repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
GunoH/intellij-community | refs/heads/master | python/testData/formatter/indentAfterBackslash_after.py | 79 | foo = bar \
if bar is not None else None
|
40223142/2015cad0623 | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/multiprocessing/util.py | 696 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
|
omtinez/micropython | refs/heads/master | tools/gendoc.py | 101 | """
Generate documentation for pyboard API from C files.
"""
import os
import argparse
import re
import markdown
# given a list of (name,regex) pairs, find the first one that matches the given line
def re_match_first(regexs, line):
for name, regex in regexs:
match = re.match(regex, line)
if match:
return name, match
return None, None
def makedirs(d):
if not os.path.isdir(d):
os.makedirs(d)
class Lexer:
class LexerError(Exception):
pass
class EOF(Exception):
pass
class Break(Exception):
pass
def __init__(self, file):
self.filename = file
with open(file, 'rt') as f:
line_num = 0
lines = []
for line in f:
line_num += 1
line = line.strip()
if line == '///':
lines.append((line_num, ''))
elif line.startswith('/// '):
lines.append((line_num, line[4:]))
elif len(lines) > 0 and lines[-1][1] is not None:
lines.append((line_num, None))
if len(lines) > 0 and lines[-1][1] is not None:
lines.append((line_num, None))
self.cur_line = 0
self.lines = lines
def opt_break(self):
if len(self.lines) > 0 and self.lines[0][1] is None:
self.lines.pop(0)
def next(self):
if len(self.lines) == 0:
raise Lexer.EOF
else:
l = self.lines.pop(0)
self.cur_line = l[0]
if l[1] is None:
raise Lexer.Break
else:
return l[1]
def error(self, msg):
print('({}:{}) {}'.format(self.filename, self.cur_line, msg))
raise Lexer.LexerError
class MarkdownWriter:
def __init__(self):
pass
def start(self):
self.lines = []
def end(self):
return '\n'.join(self.lines)
def heading(self, level, text):
if len(self.lines) > 0:
self.lines.append('')
self.lines.append(level * '#' + ' ' + text)
self.lines.append('')
def para(self, text):
if len(self.lines) > 0 and self.lines[-1] != '':
self.lines.append('')
if isinstance(text, list):
self.lines.extend(text)
elif isinstance(text, str):
self.lines.append(text)
else:
assert False
self.lines.append('')
def single_line(self, text):
self.lines.append(text)
def module(self, name, short_descr, descr):
self.heading(1, 'module {}'.format(name))
self.para(descr)
def function(self, ctx, name, args, descr):
proto = '{}.{}{}'.format(ctx, self.name, self.args)
self.heading(3, '`' + proto + '`')
self.para(descr)
def method(self, ctx, name, args, descr):
if name == '\\constructor':
proto = '{}{}'.format(ctx, args)
elif name == '\\call':
proto = '{}{}'.format(ctx, args)
else:
proto = '{}.{}{}'.format(ctx, name, args)
self.heading(3, '`' + proto + '`')
self.para(descr)
def constant(self, ctx, name, descr):
self.single_line('`{}.{}` - {}'.format(ctx, name, descr))
class ReStructuredTextWriter:
head_chars = {1:'=', 2:'-', 3:'.'}
def __init__(self):
pass
def start(self):
self.lines = []
def end(self):
return '\n'.join(self.lines)
def _convert(self, text):
return text.replace('`', '``').replace('*', '\\*')
def heading(self, level, text, convert=True):
if len(self.lines) > 0:
self.lines.append('')
if convert:
text = self._convert(text)
self.lines.append(text)
self.lines.append(len(text) * self.head_chars[level])
self.lines.append('')
def para(self, text, indent=''):
if len(self.lines) > 0 and self.lines[-1] != '':
self.lines.append('')
if isinstance(text, list):
for t in text:
self.lines.append(indent + self._convert(t))
elif isinstance(text, str):
self.lines.append(indent + self._convert(text))
else:
assert False
self.lines.append('')
def single_line(self, text):
self.lines.append(self._convert(text))
def module(self, name, short_descr, descr):
self.heading(1, ':mod:`{}` --- {}'.format(name, self._convert(short_descr)), convert=False)
self.lines.append('.. module:: {}'.format(name))
self.lines.append(' :synopsis: {}'.format(short_descr))
self.para(descr)
def function(self, ctx, name, args, descr):
args = self._convert(args)
self.lines.append('.. function:: ' + name + args)
self.para(descr, indent=' ')
def method(self, ctx, name, args, descr):
args = self._convert(args)
if name == '\\constructor':
self.lines.append('.. class:: ' + ctx + args)
elif name == '\\call':
self.lines.append('.. method:: ' + ctx + args)
else:
self.lines.append('.. method:: ' + ctx + '.' + name + args)
self.para(descr, indent=' ')
def constant(self, ctx, name, descr):
self.lines.append('.. data:: ' + name)
self.para(descr, indent=' ')
class DocValidateError(Exception):
pass
class DocItem:
def __init__(self):
self.doc = []
def add_doc(self, lex):
try:
while True:
line = lex.next()
if len(line) > 0 or len(self.doc) > 0:
self.doc.append(line)
except Lexer.Break:
pass
def dump(self, writer):
writer.para(self.doc)
class DocConstant(DocItem):
def __init__(self, name, descr):
super().__init__()
self.name = name
self.descr = descr
def dump(self, ctx, writer):
writer.constant(ctx, self.name, self.descr)
class DocFunction(DocItem):
def __init__(self, name, args):
super().__init__()
self.name = name
self.args = args
def dump(self, ctx, writer):
writer.function(ctx, self.name, self.args, self.doc)
class DocMethod(DocItem):
def __init__(self, name, args):
super().__init__()
self.name = name
self.args = args
def dump(self, ctx, writer):
writer.method(ctx, self.name, self.args, self.doc)
class DocClass(DocItem):
def __init__(self, name, descr):
super().__init__()
self.name = name
self.descr = descr
self.constructors = {}
self.classmethods = {}
self.methods = {}
self.constants = {}
def process_classmethod(self, lex, d):
name = d['id']
if name == '\\constructor':
dict_ = self.constructors
else:
dict_ = self.classmethods
if name in dict_:
lex.error("multiple definition of method '{}'".format(name))
method = dict_[name] = DocMethod(name, d['args'])
method.add_doc(lex)
def process_method(self, lex, d):
name = d['id']
dict_ = self.methods
if name in dict_:
lex.error("multiple definition of method '{}'".format(name))
method = dict_[name] = DocMethod(name, d['args'])
method.add_doc(lex)
def process_constant(self, lex, d):
name = d['id']
if name in self.constants:
lex.error("multiple definition of constant '{}'".format(name))
self.constants[name] = DocConstant(name, d['descr'])
lex.opt_break()
def dump(self, writer):
writer.heading(1, 'class {}'.format(self.name))
super().dump(writer)
if len(self.constructors) > 0:
writer.heading(2, 'Constructors')
for f in sorted(self.constructors.values(), key=lambda x:x.name):
f.dump(self.name, writer)
if len(self.classmethods) > 0:
writer.heading(2, 'Class methods')
for f in sorted(self.classmethods.values(), key=lambda x:x.name):
f.dump(self.name, writer)
if len(self.methods) > 0:
writer.heading(2, 'Methods')
for f in sorted(self.methods.values(), key=lambda x:x.name):
f.dump(self.name.lower(), writer)
if len(self.constants) > 0:
writer.heading(2, 'Constants')
for c in sorted(self.constants.values(), key=lambda x:x.name):
c.dump(self.name, writer)
class DocModule(DocItem):
def __init__(self, name, descr):
super().__init__()
self.name = name
self.descr = descr
self.functions = {}
self.constants = {}
self.classes = {}
self.cur_class = None
def new_file(self):
self.cur_class = None
def process_function(self, lex, d):
name = d['id']
if name in self.functions:
lex.error("multiple definition of function '{}'".format(name))
function = self.functions[name] = DocFunction(name, d['args'])
function.add_doc(lex)
#def process_classref(self, lex, d):
# name = d['id']
# self.classes[name] = name
# lex.opt_break()
def process_class(self, lex, d):
name = d['id']
if name in self.classes:
lex.error("multiple definition of class '{}'".format(name))
self.cur_class = self.classes[name] = DocClass(name, d['descr'])
self.cur_class.add_doc(lex)
def process_classmethod(self, lex, d):
self.cur_class.process_classmethod(lex, d)
def process_method(self, lex, d):
self.cur_class.process_method(lex, d)
def process_constant(self, lex, d):
if self.cur_class is None:
# a module-level constant
name = d['id']
if name in self.constants:
lex.error("multiple definition of constant '{}'".format(name))
self.constants[name] = DocConstant(name, d['descr'])
lex.opt_break()
else:
# a class-level constant
self.cur_class.process_constant(lex, d)
def validate(self):
if self.descr is None:
raise DocValidateError('module {} referenced but never defined'.format(self.name))
def dump(self, writer):
writer.module(self.name, self.descr, self.doc)
if self.functions:
writer.heading(2, 'Functions')
for f in sorted(self.functions.values(), key=lambda x:x.name):
f.dump(self.name, writer)
if self.constants:
writer.heading(2, 'Constants')
for c in sorted(self.constants.values(), key=lambda x:x.name):
c.dump(self.name, writer)
if self.classes:
writer.heading(2, 'Classes')
for c in sorted(self.classes.values(), key=lambda x:x.name):
writer.para('[`{}.{}`]({}) - {}'.format(self.name, c.name, c.name, c.descr))
def write_html(self, dir):
md_writer = MarkdownWriter()
md_writer.start()
self.dump(md_writer)
with open(os.path.join(dir, 'index.html'), 'wt') as f:
f.write(markdown.markdown(md_writer.end()))
for c in self.classes.values():
class_dir = os.path.join(dir, c.name)
makedirs(class_dir)
md_writer.start()
md_writer.para('part of the [{} module](./)'.format(self.name))
c.dump(md_writer)
with open(os.path.join(class_dir, 'index.html'), 'wt') as f:
f.write(markdown.markdown(md_writer.end()))
def write_rst(self, dir):
rst_writer = ReStructuredTextWriter()
rst_writer.start()
self.dump(rst_writer)
with open(dir + '/' + self.name + '.rst', 'wt') as f:
f.write(rst_writer.end())
for c in self.classes.values():
rst_writer.start()
c.dump(rst_writer)
with open(dir + '/' + self.name + '.' + c.name + '.rst', 'wt') as f:
f.write(rst_writer.end())
class Doc:
def __init__(self):
self.modules = {}
self.cur_module = None
def new_file(self):
self.cur_module = None
for m in self.modules.values():
m.new_file()
def check_module(self, lex):
if self.cur_module is None:
lex.error('module not defined')
def process_module(self, lex, d):
name = d['id']
if name not in self.modules:
self.modules[name] = DocModule(name, None)
self.cur_module = self.modules[name]
if self.cur_module.descr is not None:
lex.error("multiple definition of module '{}'".format(name))
self.cur_module.descr = d['descr']
self.cur_module.add_doc(lex)
def process_moduleref(self, lex, d):
name = d['id']
if name not in self.modules:
self.modules[name] = DocModule(name, None)
self.cur_module = self.modules[name]
lex.opt_break()
def process_class(self, lex, d):
self.check_module(lex)
self.cur_module.process_class(lex, d)
def process_function(self, lex, d):
self.check_module(lex)
self.cur_module.process_function(lex, d)
def process_classmethod(self, lex, d):
self.check_module(lex)
self.cur_module.process_classmethod(lex, d)
def process_method(self, lex, d):
self.check_module(lex)
self.cur_module.process_method(lex, d)
def process_constant(self, lex, d):
self.check_module(lex)
self.cur_module.process_constant(lex, d)
def validate(self):
for m in self.modules.values():
m.validate()
def dump(self, writer):
writer.heading(1, 'Modules')
writer.para('These are the Python modules that are implemented.')
for m in sorted(self.modules.values(), key=lambda x:x.name):
writer.para('[`{}`]({}/) - {}'.format(m.name, m.name, m.descr))
def write_html(self, dir):
md_writer = MarkdownWriter()
with open(os.path.join(dir, 'module', 'index.html'), 'wt') as f:
md_writer.start()
self.dump(md_writer)
f.write(markdown.markdown(md_writer.end()))
for m in self.modules.values():
mod_dir = os.path.join(dir, 'module', m.name)
makedirs(mod_dir)
m.write_html(mod_dir)
def write_rst(self, dir):
#with open(os.path.join(dir, 'module', 'index.html'), 'wt') as f:
# f.write(markdown.markdown(self.dump()))
for m in self.modules.values():
m.write_rst(dir)
regex_descr = r'(?P<descr>.*)'
doc_regexs = (
(Doc.process_module, re.compile(r'\\module (?P<id>[a-z][a-z0-9]*) - ' + regex_descr + r'$')),
(Doc.process_moduleref, re.compile(r'\\moduleref (?P<id>[a-z]+)$')),
(Doc.process_function, re.compile(r'\\function (?P<id>[a-z0-9_]+)(?P<args>\(.*\))$')),
(Doc.process_classmethod, re.compile(r'\\classmethod (?P<id>\\?[a-z0-9_]+)(?P<args>\(.*\))$')),
(Doc.process_method, re.compile(r'\\method (?P<id>\\?[a-z0-9_]+)(?P<args>\(.*\))$')),
(Doc.process_constant, re.compile(r'\\constant (?P<id>[A-Za-z0-9_]+) - ' + regex_descr + r'$')),
#(Doc.process_classref, re.compile(r'\\classref (?P<id>[A-Za-z0-9_]+)$')),
(Doc.process_class, re.compile(r'\\class (?P<id>[A-Za-z0-9_]+) - ' + regex_descr + r'$')),
)
def process_file(file, doc):
lex = Lexer(file)
doc.new_file()
try:
try:
while True:
line = lex.next()
fun, match = re_match_first(doc_regexs, line)
if fun == None:
lex.error('unknown line format: {}'.format(line))
fun(doc, lex, match.groupdict())
except Lexer.Break:
lex.error('unexpected break')
except Lexer.EOF:
pass
except Lexer.LexerError:
return False
return True
def main():
cmd_parser = argparse.ArgumentParser(description='Generate documentation for pyboard API from C files.')
cmd_parser.add_argument('--outdir', metavar='<output dir>', default='gendoc-out', help='ouput directory')
cmd_parser.add_argument('--format', default='html', help='output format: html or rst')
cmd_parser.add_argument('files', nargs='+', help='input files')
args = cmd_parser.parse_args()
doc = Doc()
for file in args.files:
print('processing', file)
if not process_file(file, doc):
return
try:
doc.validate()
except DocValidateError as e:
print(e)
makedirs(args.outdir)
if args.format == 'html':
doc.write_html(args.outdir)
elif args.format == 'rst':
doc.write_rst(args.outdir)
else:
print('unknown format:', args.format)
return
print('written to', args.outdir)
if __name__ == "__main__":
main()
|
jayme-github/CouchPotatoServer | refs/heads/master | libs/sqlalchemy/dialects/maxdb/base.py | 15 | # maxdb/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MaxDB database.
This dialect is *not* ported to SQLAlchemy 0.6 or 0.7.
This dialect is *not* tested on SQLAlchemy 0.6 or 0.7.
Overview
--------
The ``maxdb`` dialect is **experimental** and has only been tested on 7.6.03.007
and 7.6.00.037. Of these, **only 7.6.03.007 will work** with SQLAlchemy's ORM.
The earlier version has severe ``LEFT JOIN`` limitations and will return
incorrect results from even very simple ORM queries.
Only the native Python DB-API is currently supported. ODBC driver support
is a future enhancement.
Connecting
----------
The username is case-sensitive. If you usually connect to the
database with sqlcli and other tools in lower case, you likely need to
use upper case for DB-API.
Implementation Notes
--------------------
With the 7.6.00.37 driver and Python 2.5, it seems that all DB-API
generated exceptions are broken and can cause Python to crash.
For 'somecol.in_([])' to work, the IN operator's generation must be changed
to cast 'NULL' to a numeric, i.e. NUM(NULL). The DB-API doesn't accept a
bind parameter there, so that particular generation must inline the NULL value,
which depends on [ticket:807].
The DB-API is very picky about where bind params may be used in queries.
Bind params for some functions (e.g. MOD) need type information supplied.
The dialect does not yet do this automatically.
Max will occasionally throw up 'bad sql, compile again' exceptions for
perfectly valid SQL. The dialect does not currently handle these, more
research is needed.
MaxDB 7.5 and Sap DB <= 7.4 reportedly do not support schemas. A very
slightly different version of this dialect would be required to support
those versions, and can easily be added if there is demand. Some other
required components such as an Max-aware 'old oracle style' join compiler
(thetas with (+) outer indicators) are already done and available for
integration- email the devel list if you're interested in working on
this.
Versions tested: 7.6.03.07 and 7.6.00.37, native Python DB-API
* MaxDB has severe limitations on OUTER JOINs, which are essential to ORM
eager loading. And rather than raise an error if a SELECT can't be serviced,
the database simply returns incorrect results.
* Version 7.6.03.07 seems to JOIN properly, however the docs do not show the
OUTER restrictions being lifted (as of this writing), and no changelog is
available to confirm either. If you are using a different server version and
your tasks require the ORM or any semi-advanced SQL through the SQL layer,
running the SQLAlchemy test suite against your database is HIGHLY
recommended before you begin.
* Version 7.6.00.37 is LHS/RHS sensitive in `FROM lhs LEFT OUTER JOIN rhs ON
lhs.col=rhs.col` vs `rhs.col=lhs.col`!
* Version 7.6.00.37 is confused by `SELECT DISTINCT col as alias FROM t ORDER
BY col` - these aliased, DISTINCT, ordered queries need to be re-written to
order by the alias name.
* Version 7.6.x supports creating a SAVEPOINT but not its RELEASE.
* MaxDB supports autoincrement-style columns (DEFAULT SERIAL) and independent
sequences. When including a DEFAULT SERIAL column in an insert, 0 needs to
be inserted rather than NULL to generate a value.
* MaxDB supports ANSI and "old Oracle style" theta joins with (+) outer join
indicators.
* The SQLAlchemy dialect is schema-aware and probably won't function correctly
on server versions (pre-7.6?). Support for schema-less server versions could
be added if there's call.
* ORDER BY is not supported in subqueries. LIMIT is not supported in
subqueries. In 7.6.00.37, TOP does work in subqueries, but without limit not
so useful. OFFSET does not work in 7.6 despite being in the docs. Row number
tricks in WHERE via ROWNO may be possible but it only seems to allow
less-than comparison!
* Version 7.6.03.07 can't LIMIT if a derived table is in FROM: `SELECT * FROM
(SELECT * FROM a) LIMIT 2`
* MaxDB does not support sql's CAST and can only usefullly cast two types.
There isn't much implicit type conversion, so be precise when creating
`PassiveDefaults` in DDL generation: `'3'` and `3` aren't the same.
sapdb.dbapi
^^^^^^^^^^^
* As of 2007-10-22 the Python 2.4 and 2.5 compatible versions of the DB-API
are no longer available. A forum posting at SAP states that the Python
driver will be available again "in the future". The last release from MySQL
AB works if you can find it.
* sequence.NEXTVAL skips every other value!
* No rowcount for executemany()
* If an INSERT into a table with a DEFAULT SERIAL column inserts the results
of a function `INSERT INTO t VALUES (LENGTH('foo'))`, the cursor won't have
the serial id. It needs to be manually yanked from tablename.CURRVAL.
* Super-duper picky about where bind params can be placed. Not smart about
converting Python types for some functions, such as `MOD(5, ?)`.
* LONG (text, binary) values in result sets are read-once. The dialect uses a
caching RowProxy when these types are present.
* Connection objects seem like they want to be either `close()`d or garbage
collected, but not both. There's a warning issued but it seems harmless.
"""
import datetime, itertools, re
from sqlalchemy import exc, schema, sql, util, processors
from sqlalchemy.sql import operators as sql_operators, expression as sql_expr
from sqlalchemy.sql import compiler, visitors
from sqlalchemy.engine import base as engine_base, default, reflection
from sqlalchemy import types as sqltypes
class _StringType(sqltypes.String):
_type = None
def __init__(self, length=None, encoding=None, **kw):
super(_StringType, self).__init__(length=length, **kw)
self.encoding = encoding
def bind_processor(self, dialect):
if self.encoding == 'unicode':
return None
else:
def process(value):
if isinstance(value, unicode):
return value.encode(dialect.encoding)
else:
return value
return process
def result_processor(self, dialect, coltype):
#XXX: this code is probably very slow and one should try (if at all
# possible) to determine the correct code path on a per-connection
# basis (ie, here in result_processor, instead of inside the processor
# function itself) and probably also use a few generic
# processors, or possibly per query (though there is no mechanism
# for that yet).
def process(value):
while True:
if value is None:
return None
elif isinstance(value, unicode):
return value
elif isinstance(value, str):
if self.convert_unicode or dialect.convert_unicode:
return value.decode(dialect.encoding)
else:
return value
elif hasattr(value, 'read'):
# some sort of LONG, snarf and retry
value = value.read(value.remainingLength())
continue
else:
# unexpected type, return as-is
return value
return process
class MaxString(_StringType):
_type = 'VARCHAR'
class MaxUnicode(_StringType):
_type = 'VARCHAR'
def __init__(self, length=None, **kw):
kw['encoding'] = 'unicode'
super(MaxUnicode, self).__init__(length=length, **kw)
class MaxChar(_StringType):
_type = 'CHAR'
class MaxText(_StringType):
_type = 'LONG'
def __init__(self, length=None, **kw):
super(MaxText, self).__init__(length, **kw)
def get_col_spec(self):
spec = 'LONG'
if self.encoding is not None:
spec = ' '.join((spec, self.encoding))
elif self.convert_unicode:
spec = ' '.join((spec, 'UNICODE'))
return spec
class MaxNumeric(sqltypes.Numeric):
"""The FIXED (also NUMERIC, DECIMAL) data type."""
def __init__(self, precision=None, scale=None, **kw):
kw.setdefault('asdecimal', True)
super(MaxNumeric, self).__init__(scale=scale, precision=precision,
**kw)
def bind_processor(self, dialect):
return None
class MaxTimestamp(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
elif isinstance(value, basestring):
return value
elif dialect.datetimeformat == 'internal':
ms = getattr(value, 'microsecond', 0)
return value.strftime("%Y%m%d%H%M%S" + ("%06u" % ms))
elif dialect.datetimeformat == 'iso':
ms = getattr(value, 'microsecond', 0)
return value.strftime("%Y-%m-%d %H:%M:%S." + ("%06u" % ms))
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." % (
dialect.datetimeformat,))
return process
def result_processor(self, dialect, coltype):
if dialect.datetimeformat == 'internal':
def process(value):
if value is None:
return None
else:
return datetime.datetime(
*[int(v)
for v in (value[0:4], value[4:6], value[6:8],
value[8:10], value[10:12], value[12:14],
value[14:])])
elif dialect.datetimeformat == 'iso':
def process(value):
if value is None:
return None
else:
return datetime.datetime(
*[int(v)
for v in (value[0:4], value[5:7], value[8:10],
value[11:13], value[14:16], value[17:19],
value[20:])])
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
class MaxDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
elif isinstance(value, basestring):
return value
elif dialect.datetimeformat == 'internal':
return value.strftime("%Y%m%d")
elif dialect.datetimeformat == 'iso':
return value.strftime("%Y-%m-%d")
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." % (
dialect.datetimeformat,))
return process
def result_processor(self, dialect, coltype):
if dialect.datetimeformat == 'internal':
def process(value):
if value is None:
return None
else:
return datetime.date(int(value[0:4]), int(value[4:6]),
int(value[6:8]))
elif dialect.datetimeformat == 'iso':
def process(value):
if value is None:
return None
else:
return datetime.date(int(value[0:4]), int(value[5:7]),
int(value[8:10]))
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
class MaxTime(sqltypes.Time):
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
elif isinstance(value, basestring):
return value
elif dialect.datetimeformat == 'internal':
return value.strftime("%H%M%S")
elif dialect.datetimeformat == 'iso':
return value.strftime("%H-%M-%S")
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." % (
dialect.datetimeformat,))
return process
def result_processor(self, dialect, coltype):
if dialect.datetimeformat == 'internal':
def process(value):
if value is None:
return None
else:
return datetime.time(int(value[0:4]), int(value[4:6]),
int(value[6:8]))
elif dialect.datetimeformat == 'iso':
def process(value):
if value is None:
return None
else:
return datetime.time(int(value[0:4]), int(value[5:7]),
int(value[8:10]))
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
class MaxBlob(sqltypes.LargeBinary):
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.read(value.remainingLength())
return process
class MaxDBTypeCompiler(compiler.GenericTypeCompiler):
def _string_spec(self, string_spec, type_):
if type_.length is None:
spec = 'LONG'
else:
spec = '%s(%s)' % (string_spec, type_.length)
if getattr(type_, 'encoding'):
spec = ' '.join([spec, getattr(type_, 'encoding').upper()])
return spec
def visit_text(self, type_):
spec = 'LONG'
if getattr(type_, 'encoding', None):
spec = ' '.join((spec, type_.encoding))
elif type_.convert_unicode:
spec = ' '.join((spec, 'UNICODE'))
return spec
def visit_char(self, type_):
return self._string_spec("CHAR", type_)
def visit_string(self, type_):
return self._string_spec("VARCHAR", type_)
def visit_large_binary(self, type_):
return "LONG BYTE"
def visit_numeric(self, type_):
if type_.scale and type_.precision:
return 'FIXED(%s, %s)' % (type_.precision, type_.scale)
elif type_.precision:
return 'FIXED(%s)' % type_.precision
else:
return 'INTEGER'
def visit_BOOLEAN(self, type_):
return "BOOLEAN"
colspecs = {
sqltypes.Numeric: MaxNumeric,
sqltypes.DateTime: MaxTimestamp,
sqltypes.Date: MaxDate,
sqltypes.Time: MaxTime,
sqltypes.String: MaxString,
sqltypes.Unicode:MaxUnicode,
sqltypes.LargeBinary: MaxBlob,
sqltypes.Text: MaxText,
sqltypes.CHAR: MaxChar,
sqltypes.TIMESTAMP: MaxTimestamp,
sqltypes.BLOB: MaxBlob,
sqltypes.Unicode: MaxUnicode,
}
ischema_names = {
'boolean': sqltypes.BOOLEAN,
'char': sqltypes.CHAR,
'character': sqltypes.CHAR,
'date': sqltypes.DATE,
'fixed': sqltypes.Numeric,
'float': sqltypes.FLOAT,
'int': sqltypes.INT,
'integer': sqltypes.INT,
'long binary': sqltypes.BLOB,
'long unicode': sqltypes.Text,
'long': sqltypes.Text,
'long': sqltypes.Text,
'smallint': sqltypes.SmallInteger,
'time': sqltypes.Time,
'timestamp': sqltypes.TIMESTAMP,
'varchar': sqltypes.VARCHAR,
}
# TODO: migrate this to sapdb.py
class MaxDBExecutionContext(default.DefaultExecutionContext):
def post_exec(self):
# DB-API bug: if there were any functions as values,
# then do another select and pull CURRVAL from the
# autoincrement column's implicit sequence... ugh
if self.compiled.isinsert and not self.executemany:
table = self.compiled.statement.table
index, serial_col = _autoserial_column(table)
if serial_col and (not self.compiled._safeserial or
not(self._last_inserted_ids) or
self._last_inserted_ids[index] in (None, 0)):
if table.schema:
sql = "SELECT %s.CURRVAL FROM DUAL" % (
self.compiled.preparer.format_table(table))
else:
sql = "SELECT CURRENT_SCHEMA.%s.CURRVAL FROM DUAL" % (
self.compiled.preparer.format_table(table))
rs = self.cursor.execute(sql)
id = rs.fetchone()[0]
if not self._last_inserted_ids:
# This shouldn't ever be > 1? Right?
self._last_inserted_ids = \
[None] * len(table.primary_key.columns)
self._last_inserted_ids[index] = id
super(MaxDBExecutionContext, self).post_exec()
def get_result_proxy(self):
if self.cursor.description is not None:
for column in self.cursor.description:
if column[1] in ('Long Binary', 'Long', 'Long Unicode'):
return MaxDBResultProxy(self)
return engine_base.ResultProxy(self)
@property
def rowcount(self):
if hasattr(self, '_rowcount'):
return self._rowcount
else:
return self.cursor.rowcount
def fire_sequence(self, seq):
if seq.optional:
return None
return self._execute_scalar("SELECT %s.NEXTVAL FROM DUAL" % (
self.dialect.identifier_preparer.format_sequence(seq)))
class MaxDBCachedColumnRow(engine_base.RowProxy):
"""A RowProxy that only runs result_processors once per column."""
def __init__(self, parent, row):
super(MaxDBCachedColumnRow, self).__init__(parent, row)
self.columns = {}
self._row = row
self._parent = parent
def _get_col(self, key):
if key not in self.columns:
self.columns[key] = self._parent._get_col(self._row, key)
return self.columns[key]
def __iter__(self):
for i in xrange(len(self._row)):
yield self._get_col(i)
def __repr__(self):
return repr(list(self))
def __eq__(self, other):
return ((other is self) or
(other == tuple([self._get_col(key)
for key in xrange(len(self._row))])))
def __getitem__(self, key):
if isinstance(key, slice):
indices = key.indices(len(self._row))
return tuple([self._get_col(i) for i in xrange(*indices)])
else:
return self._get_col(key)
def __getattr__(self, name):
try:
return self._get_col(name)
except KeyError:
raise AttributeError(name)
class MaxDBResultProxy(engine_base.ResultProxy):
_process_row = MaxDBCachedColumnRow
class MaxDBCompiler(compiler.SQLCompiler):
function_conversion = {
'CURRENT_DATE': 'DATE',
'CURRENT_TIME': 'TIME',
'CURRENT_TIMESTAMP': 'TIMESTAMP',
}
# These functions must be written without parens when called with no
# parameters. e.g. 'SELECT DATE FROM DUAL' not 'SELECT DATE() FROM DUAL'
bare_functions = set([
'CURRENT_SCHEMA', 'DATE', 'FALSE', 'SYSDBA', 'TIME', 'TIMESTAMP',
'TIMEZONE', 'TRANSACTION', 'TRUE', 'USER', 'UID', 'USERGROUP',
'UTCDATE', 'UTCDIFF'])
def visit_mod(self, binary, **kw):
return "mod(%s, %s)" % \
(self.process(binary.left), self.process(binary.right))
def default_from(self):
return ' FROM DUAL'
def for_update_clause(self, select):
clause = select.for_update
if clause is True:
return " WITH LOCK EXCLUSIVE"
elif clause is None:
return ""
elif clause == "read":
return " WITH LOCK"
elif clause == "ignore":
return " WITH LOCK (IGNORE) EXCLUSIVE"
elif clause == "nowait":
return " WITH LOCK (NOWAIT) EXCLUSIVE"
elif isinstance(clause, basestring):
return " WITH LOCK %s" % clause.upper()
elif not clause:
return ""
else:
return " WITH LOCK EXCLUSIVE"
def function_argspec(self, fn, **kw):
if fn.name.upper() in self.bare_functions:
return ""
elif len(fn.clauses) > 0:
return compiler.SQLCompiler.function_argspec(self, fn, **kw)
else:
return ""
def visit_function(self, fn, **kw):
transform = self.function_conversion.get(fn.name.upper(), None)
if transform:
fn = fn._clone()
fn.name = transform
return super(MaxDBCompiler, self).visit_function(fn, **kw)
def visit_cast(self, cast, **kwargs):
# MaxDB only supports casts * to NUMERIC, * to VARCHAR or
# date/time to VARCHAR. Casts of LONGs will fail.
if isinstance(cast.type, (sqltypes.Integer, sqltypes.Numeric)):
return "NUM(%s)" % self.process(cast.clause)
elif isinstance(cast.type, sqltypes.String):
return "CHR(%s)" % self.process(cast.clause)
else:
return self.process(cast.clause)
def visit_sequence(self, sequence):
if sequence.optional:
return None
else:
return (
self.dialect.identifier_preparer.format_sequence(sequence) +
".NEXTVAL")
class ColumnSnagger(visitors.ClauseVisitor):
def __init__(self):
self.count = 0
self.column = None
def visit_column(self, column):
self.column = column
self.count += 1
def _find_labeled_columns(self, columns, use_labels=False):
labels = {}
for column in columns:
if isinstance(column, basestring):
continue
snagger = self.ColumnSnagger()
snagger.traverse(column)
if snagger.count == 1:
if isinstance(column, sql_expr._Label):
labels[unicode(snagger.column)] = column.name
elif use_labels:
labels[unicode(snagger.column)] = column._label
return labels
def order_by_clause(self, select, **kw):
order_by = self.process(select._order_by_clause, **kw)
# ORDER BY clauses in DISTINCT queries must reference aliased
# inner columns by alias name, not true column name.
if order_by and getattr(select, '_distinct', False):
labels = self._find_labeled_columns(select.inner_columns,
select.use_labels)
if labels:
for needs_alias in labels.keys():
r = re.compile(r'(^| )(%s)(,| |$)' %
re.escape(needs_alias))
order_by = r.sub((r'\1%s\3' % labels[needs_alias]),
order_by)
# No ORDER BY in subqueries.
if order_by:
if self.is_subquery():
# It's safe to simply drop the ORDER BY if there is no
# LIMIT. Right? Other dialects seem to get away with
# dropping order.
if select._limit:
raise exc.CompileError(
"MaxDB does not support ORDER BY in subqueries")
else:
return ""
return " ORDER BY " + order_by
else:
return ""
def get_select_precolumns(self, select):
# Convert a subquery's LIMIT to TOP
sql = select._distinct and 'DISTINCT ' or ''
if self.is_subquery() and select._limit:
if select._offset:
raise exc.InvalidRequestError(
'MaxDB does not support LIMIT with an offset.')
sql += 'TOP %s ' % select._limit
return sql
def limit_clause(self, select):
# The docs say offsets are supported with LIMIT. But they're not.
# TODO: maybe emulate by adding a ROWNO/ROWNUM predicate?
# TODO: does MaxDB support bind params for LIMIT / TOP ?
if self.is_subquery():
# sub queries need TOP
return ''
elif select._offset:
raise exc.InvalidRequestError(
'MaxDB does not support LIMIT with an offset.')
else:
return ' \n LIMIT %s' % (select._limit,)
def visit_insert(self, insert):
self.isinsert = True
self._safeserial = True
colparams = self._get_colparams(insert)
for value in (insert.parameters or {}).itervalues():
if isinstance(value, sql_expr.Function):
self._safeserial = False
break
return ''.join(('INSERT INTO ',
self.preparer.format_table(insert.table),
' (',
', '.join([self.preparer.format_column(c[0])
for c in colparams]),
') VALUES (',
', '.join([c[1] for c in colparams]),
')'))
class MaxDBIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([
'abs', 'absolute', 'acos', 'adddate', 'addtime', 'all', 'alpha',
'alter', 'any', 'ascii', 'asin', 'atan', 'atan2', 'avg', 'binary',
'bit', 'boolean', 'byte', 'case', 'ceil', 'ceiling', 'char',
'character', 'check', 'chr', 'column', 'concat', 'constraint', 'cos',
'cosh', 'cot', 'count', 'cross', 'curdate', 'current', 'curtime',
'database', 'date', 'datediff', 'day', 'dayname', 'dayofmonth',
'dayofweek', 'dayofyear', 'dec', 'decimal', 'decode', 'default',
'degrees', 'delete', 'digits', 'distinct', 'double', 'except',
'exists', 'exp', 'expand', 'first', 'fixed', 'float', 'floor', 'for',
'from', 'full', 'get_objectname', 'get_schema', 'graphic', 'greatest',
'group', 'having', 'hex', 'hextoraw', 'hour', 'ifnull', 'ignore',
'index', 'initcap', 'inner', 'insert', 'int', 'integer', 'internal',
'intersect', 'into', 'join', 'key', 'last', 'lcase', 'least', 'left',
'length', 'lfill', 'list', 'ln', 'locate', 'log', 'log10', 'long',
'longfile', 'lower', 'lpad', 'ltrim', 'makedate', 'maketime',
'mapchar', 'max', 'mbcs', 'microsecond', 'min', 'minute', 'mod',
'month', 'monthname', 'natural', 'nchar', 'next', 'no', 'noround',
'not', 'now', 'null', 'num', 'numeric', 'object', 'of', 'on',
'order', 'packed', 'pi', 'power', 'prev', 'primary', 'radians',
'real', 'reject', 'relative', 'replace', 'rfill', 'right', 'round',
'rowid', 'rowno', 'rpad', 'rtrim', 'second', 'select', 'selupd',
'serial', 'set', 'show', 'sign', 'sin', 'sinh', 'smallint', 'some',
'soundex', 'space', 'sqrt', 'stamp', 'statistics', 'stddev',
'subdate', 'substr', 'substring', 'subtime', 'sum', 'sysdba',
'table', 'tan', 'tanh', 'time', 'timediff', 'timestamp', 'timezone',
'to', 'toidentifier', 'transaction', 'translate', 'trim', 'trunc',
'truncate', 'ucase', 'uid', 'unicode', 'union', 'update', 'upper',
'user', 'usergroup', 'using', 'utcdate', 'utcdiff', 'value', 'values',
'varchar', 'vargraphic', 'variance', 'week', 'weekofyear', 'when',
'where', 'with', 'year', 'zoned' ])
def _normalize_name(self, name):
if name is None:
return None
if name.isupper():
lc_name = name.lower()
if not self._requires_quotes(lc_name):
return lc_name
return name
def _denormalize_name(self, name):
if name is None:
return None
elif (name.islower() and
not self._requires_quotes(name)):
return name.upper()
else:
return name
def _maybe_quote_identifier(self, name):
if self._requires_quotes(name):
return self.quote_identifier(name)
else:
return name
class MaxDBDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kw):
colspec = [self.preparer.format_column(column),
self.dialect.type_compiler.process(column.type)]
if not column.nullable:
colspec.append('NOT NULL')
default = column.default
default_str = self.get_column_default_string(column)
# No DDL default for columns specified with non-optional sequence-
# this defaulting behavior is entirely client-side. (And as a
# consequence, non-reflectable.)
if (default and isinstance(default, schema.Sequence) and
not default.optional):
pass
# Regular default
elif default_str is not None:
colspec.append('DEFAULT %s' % default_str)
# Assign DEFAULT SERIAL heuristically
elif column.primary_key and column.autoincrement:
# For SERIAL on a non-primary key member, use
# DefaultClause(text('SERIAL'))
try:
first = [c for c in column.table.primary_key.columns
if (c.autoincrement and
(isinstance(c.type, sqltypes.Integer) or
(isinstance(c.type, MaxNumeric) and
c.type.precision)) and
not c.foreign_keys)].pop(0)
if column is first:
colspec.append('DEFAULT SERIAL')
except IndexError:
pass
return ' '.join(colspec)
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.default.arg, basestring):
if isinstance(column.type, sqltypes.Integer):
return str(column.default.arg)
else:
return "'%s'" % column.default.arg
else:
return unicode(self._compile(column.default.arg, None))
else:
return None
def visit_create_sequence(self, create):
"""Creates a SEQUENCE.
TODO: move to module doc?
start
With an integer value, set the START WITH option.
increment
An integer value to increment by. Default is the database default.
maxdb_minvalue
maxdb_maxvalue
With an integer value, sets the corresponding sequence option.
maxdb_no_minvalue
maxdb_no_maxvalue
Defaults to False. If true, sets the corresponding sequence option.
maxdb_cycle
Defaults to False. If true, sets the CYCLE option.
maxdb_cache
With an integer value, sets the CACHE option.
maxdb_no_cache
Defaults to False. If true, sets NOCACHE.
"""
sequence = create.element
if (not sequence.optional and
(not self.checkfirst or
not self.dialect.has_sequence(self.connection, sequence.name))):
ddl = ['CREATE SEQUENCE',
self.preparer.format_sequence(sequence)]
sequence.increment = 1
if sequence.increment is not None:
ddl.extend(('INCREMENT BY', str(sequence.increment)))
if sequence.start is not None:
ddl.extend(('START WITH', str(sequence.start)))
opts = dict([(pair[0][6:].lower(), pair[1])
for pair in sequence.kwargs.items()
if pair[0].startswith('maxdb_')])
if 'maxvalue' in opts:
ddl.extend(('MAXVALUE', str(opts['maxvalue'])))
elif opts.get('no_maxvalue', False):
ddl.append('NOMAXVALUE')
if 'minvalue' in opts:
ddl.extend(('MINVALUE', str(opts['minvalue'])))
elif opts.get('no_minvalue', False):
ddl.append('NOMINVALUE')
if opts.get('cycle', False):
ddl.append('CYCLE')
if 'cache' in opts:
ddl.extend(('CACHE', str(opts['cache'])))
elif opts.get('no_cache', False):
ddl.append('NOCACHE')
return ' '.join(ddl)
class MaxDBDialect(default.DefaultDialect):
name = 'maxdb'
supports_alter = True
supports_unicode_statements = True
max_identifier_length = 32
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
preparer = MaxDBIdentifierPreparer
statement_compiler = MaxDBCompiler
ddl_compiler = MaxDBDDLCompiler
execution_ctx_cls = MaxDBExecutionContext
ported_sqla_06 = False
colspecs = colspecs
ischema_names = ischema_names
# MaxDB-specific
datetimeformat = 'internal'
def __init__(self, _raise_known_sql_errors=False, **kw):
super(MaxDBDialect, self).__init__(**kw)
self._raise_known = _raise_known_sql_errors
if self.dbapi is None:
self.dbapi_type_map = {}
else:
self.dbapi_type_map = {
'Long Binary': MaxBlob(),
'Long byte_t': MaxBlob(),
'Long Unicode': MaxText(),
'Timestamp': MaxTimestamp(),
'Date': MaxDate(),
'Time': MaxTime(),
datetime.datetime: MaxTimestamp(),
datetime.date: MaxDate(),
datetime.time: MaxTime(),
}
def do_execute(self, cursor, statement, parameters, context=None):
res = cursor.execute(statement, parameters)
if isinstance(res, int) and context is not None:
context._rowcount = res
def do_release_savepoint(self, connection, name):
# Does MaxDB truly support RELEASE SAVEPOINT <id>? All my attempts
# produce "SUBTRANS COMMIT/ROLLBACK not allowed without SUBTRANS
# BEGIN SQLSTATE: I7065"
# Note that ROLLBACK TO works fine. In theory, a RELEASE should
# just free up some transactional resources early, before the overall
# COMMIT/ROLLBACK so omitting it should be relatively ok.
pass
def _get_default_schema_name(self, connection):
return self.identifier_preparer._normalize_name(
connection.execute(
'SELECT CURRENT_SCHEMA FROM DUAL').scalar())
def has_table(self, connection, table_name, schema=None):
denormalize = self.identifier_preparer._denormalize_name
bind = [denormalize(table_name)]
if schema is None:
sql = ("SELECT tablename FROM TABLES "
"WHERE TABLES.TABLENAME=? AND"
" TABLES.SCHEMANAME=CURRENT_SCHEMA ")
else:
sql = ("SELECT tablename FROM TABLES "
"WHERE TABLES.TABLENAME = ? AND"
" TABLES.SCHEMANAME=? ")
bind.append(denormalize(schema))
rp = connection.execute(sql, bind)
return bool(rp.first())
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
sql = (" SELECT TABLENAME FROM TABLES WHERE "
" SCHEMANAME=CURRENT_SCHEMA ")
rs = connection.execute(sql)
else:
sql = (" SELECT TABLENAME FROM TABLES WHERE "
" SCHEMANAME=? ")
matchname = self.identifier_preparer._denormalize_name(schema)
rs = connection.execute(sql, matchname)
normalize = self.identifier_preparer._normalize_name
return [normalize(row[0]) for row in rs]
def reflecttable(self, connection, table, include_columns):
denormalize = self.identifier_preparer._denormalize_name
normalize = self.identifier_preparer._normalize_name
st = ('SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, '
' NULLABLE, "DEFAULT", DEFAULTFUNCTION '
'FROM COLUMNS '
'WHERE TABLENAME=? AND SCHEMANAME=%s '
'ORDER BY POS')
fk = ('SELECT COLUMNNAME, FKEYNAME, '
' REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, '
' (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA '
' THEN 1 ELSE 0 END) AS in_schema '
'FROM FOREIGNKEYCOLUMNS '
'WHERE TABLENAME=? AND SCHEMANAME=%s '
'ORDER BY FKEYNAME ')
params = [denormalize(table.name)]
if not table.schema:
st = st % 'CURRENT_SCHEMA'
fk = fk % 'CURRENT_SCHEMA'
else:
st = st % '?'
fk = fk % '?'
params.append(denormalize(table.schema))
rows = connection.execute(st, params).fetchall()
if not rows:
raise exc.NoSuchTableError(table.fullname)
include_columns = set(include_columns or [])
for row in rows:
(name, mode, col_type, encoding, length, scale,
nullable, constant_def, func_def) = row
name = normalize(name)
if include_columns and name not in include_columns:
continue
type_args, type_kw = [], {}
if col_type == 'FIXED':
type_args = length, scale
# Convert FIXED(10) DEFAULT SERIAL to our Integer
if (scale == 0 and
func_def is not None and func_def.startswith('SERIAL')):
col_type = 'INTEGER'
type_args = length,
elif col_type in 'FLOAT':
type_args = length,
elif col_type in ('CHAR', 'VARCHAR'):
type_args = length,
type_kw['encoding'] = encoding
elif col_type == 'LONG':
type_kw['encoding'] = encoding
try:
type_cls = ischema_names[col_type.lower()]
type_instance = type_cls(*type_args, **type_kw)
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(col_type, name))
type_instance = sqltypes.NullType
col_kw = {'autoincrement': False}
col_kw['nullable'] = (nullable == 'YES')
col_kw['primary_key'] = (mode == 'KEY')
if func_def is not None:
if func_def.startswith('SERIAL'):
if col_kw['primary_key']:
# No special default- let the standard autoincrement
# support handle SERIAL pk columns.
col_kw['autoincrement'] = True
else:
# strip current numbering
col_kw['server_default'] = schema.DefaultClause(
sql.text('SERIAL'))
col_kw['autoincrement'] = True
else:
col_kw['server_default'] = schema.DefaultClause(
sql.text(func_def))
elif constant_def is not None:
col_kw['server_default'] = schema.DefaultClause(sql.text(
"'%s'" % constant_def.replace("'", "''")))
table.append_column(schema.Column(name, type_instance, **col_kw))
fk_sets = itertools.groupby(connection.execute(fk, params),
lambda row: row.FKEYNAME)
for fkeyname, fkey in fk_sets:
fkey = list(fkey)
if include_columns:
key_cols = set([r.COLUMNNAME for r in fkey])
if key_cols != include_columns:
continue
columns, referants = [], []
quote = self.identifier_preparer._maybe_quote_identifier
for row in fkey:
columns.append(normalize(row.COLUMNNAME))
if table.schema or not row.in_schema:
referants.append('.'.join(
[quote(normalize(row[c]))
for c in ('REFSCHEMANAME', 'REFTABLENAME',
'REFCOLUMNNAME')]))
else:
referants.append('.'.join(
[quote(normalize(row[c]))
for c in ('REFTABLENAME', 'REFCOLUMNNAME')]))
constraint_kw = {'name': fkeyname.lower()}
if fkey[0].RULE is not None:
rule = fkey[0].RULE
if rule.startswith('DELETE '):
rule = rule[7:]
constraint_kw['ondelete'] = rule
table_kw = {}
if table.schema or not row.in_schema:
table_kw['schema'] = normalize(fkey[0].REFSCHEMANAME)
ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME),
table_kw.get('schema'))
if ref_key not in table.metadata.tables:
schema.Table(normalize(fkey[0].REFTABLENAME),
table.metadata,
autoload=True, autoload_with=connection,
**table_kw)
constraint = schema.ForeignKeyConstraint(
columns, referants, link_to_name=True,
**constraint_kw)
table.append_constraint(constraint)
def has_sequence(self, connection, name):
# [ticket:726] makes this schema-aware.
denormalize = self.identifier_preparer._denormalize_name
sql = ("SELECT sequence_name FROM SEQUENCES "
"WHERE SEQUENCE_NAME=? ")
rp = connection.execute(sql, denormalize(name))
return bool(rp.first())
def _autoserial_column(table):
"""Finds the effective DEFAULT SERIAL column of a Table, if any."""
for index, col in enumerate(table.primary_key.columns):
if (isinstance(col.type, (sqltypes.Integer, sqltypes.Numeric)) and
col.autoincrement):
if isinstance(col.default, schema.Sequence):
if col.default.optional:
return index, col
elif (col.default is None or
(not isinstance(col.server_default, schema.DefaultClause))):
return index, col
return None, None
|
cappatar/knesset-data-pipelines | refs/heads/master | datapackage_pipelines_knesset/committees/processors/parse_committee_meeting_protocols.py | 4 | from datapackage_pipelines_knesset.common.processors.base_processor import BaseProcessor
from knesset_data.protocols.committee import CommitteeMeetingProtocol
from knesset_data.protocols.exceptions import AntiwordException
import os, csv, json, subprocess, logging, shutil, tempfile
from datapackage_pipelines_knesset.common import object_storage
import xml.etree.ElementTree
class ParseCommitteeMeetingProtocolsProcessor(BaseProcessor):
def __init__(self, *args, **kwargs):
super(ParseCommitteeMeetingProtocolsProcessor, self).__init__(*args, **kwargs)
self._schema["fields"] = [
{"name": "kns_committee_id", "type": "integer", "description": "primary key from kns_committee table"},
{"name": "kns_session_id", "type": "integer", "description": "primary key from kns_committeesession table"},
{"name": "protocol_object_name", "type": "string", "description": "storage object name containing the downloaded protocol"},
{"name": "protocol_extension", "type": "string", "description": "file extension of the downloaded protocol"},
{"name": "text_object_name", "type": "string", "description": "storage object name containing the parsed protocol text"},
{"name": "parts_object_name", "type": "string", "description": "storage object name containing the parsed protocol csv"},]
self._schema["primaryKey"] = ["kns_session_id"]
self.s3 = object_storage.get_s3()
def _process(self, datapackage, resources):
return self._process_filter(datapackage, resources)
def _filter_row(self, meeting_protocol, **kwargs):
bucket = "committees"
protocol_object_name = meeting_protocol["protocol_object_name"]
protocol_extension = meeting_protocol["protocol_extension"]
base_object_name = "protocols/parsed/{}/{}".format(meeting_protocol["kns_committee_id"], meeting_protocol["kns_session_id"])
parts_object_name = "{}.csv".format(base_object_name)
text_object_name = "{}.txt".format(base_object_name)
if not object_storage.exists(self.s3, bucket, parts_object_name, min_size=5):
parse_args = (meeting_protocol["kns_committee_id"], meeting_protocol["kns_session_id"],
bucket, protocol_object_name, parts_object_name, text_object_name)
if protocol_extension == "doc":
parse_res = self._parse_doc_protocol(*parse_args)
elif protocol_extension == "rtf":
parse_res = self._parse_rtf_protocol(*parse_args)
elif protocol_extension == "docx":
parse_res = None
else:
raise Exception("unknown extension: {}".format(protocol_extension))
if not parse_res:
# in case parsing failed - we remove all parsed files, to ensure re-parse next time
object_storage.delete(self.s3, bucket, text_object_name)
object_storage.delete(self.s3, bucket, parts_object_name)
text_object_name = None
parts_object_name = None
yield {"kns_committee_id": meeting_protocol["kns_committee_id"],
"kns_session_id": meeting_protocol["kns_session_id"],
"protocol_object_name": protocol_object_name,
"protocol_extension": protocol_extension,
"text_object_name": text_object_name,
"parts_object_name": parts_object_name}
def _parse_rtf_protocol(self, committee_id, meeting_id, bucket, protocol_object_name, parts_object_name, text_object_name):
# currently with the new API - we don't seem to get rtf files anymore
# it looks like files which used to be rtf are actually doc
# need to investigate further
return False
# rtf_extractor = os.environ.get("RTF_EXTRACTOR_BIN")
# if rtf_extractor:
# with object_storage.temp_download(protocol_object_name) as protocol_filename:
# with tempfile.NamedTemporaryFile() as text_filename:
# cmd = rtf_extractor + ' ' + protocol_filename + ' ' + text_filename
# try:
# subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
# protocol_text = fs.read(text_filename)
# with CommitteeMeetingProtocol.get_from_text(protocol_text) as protocol:
# self._parse_protocol_parts(parts_filename, protocol)
# except subprocess.SubprocessError:
# logging.exception("committee {} meeting {}: failed to parse rtf file, skipping".format(committee_id,
# meeting_id))
# return False
# return True
# else:
# logging.warning("missing RTF_EXTRACTOR_BIN environment variable, skipping rtf parsing")
# return False
def _parse_doc_protocol(self, committee_id, meeting_id, bucket, protocol_object_name, parts_object_name, text_object_name):
logging.info("parsing doc protocol {} --> {}, {}".format(protocol_object_name, parts_object_name, text_object_name))
with object_storage.temp_download(self.s3, bucket, protocol_object_name) as protocol_filename:
try:
with CommitteeMeetingProtocol.get_from_filename(protocol_filename) as protocol:
object_storage.write(self.s3, bucket, text_object_name, protocol.text, public_bucket=True)
self._parse_protocol_parts(bucket, parts_object_name, protocol)
except (
AntiwordException, # see https://github.com/hasadna/knesset-data-pipelines/issues/15
subprocess.SubprocessError,
xml.etree.ElementTree.ParseError # see https://github.com/hasadna/knesset-data-pipelines/issues/32
):
logging.exception("committee {} meeting {}: failed to parse doc file, skipping".format(committee_id, meeting_id))
return False
return True
def _parse_protocol_parts(self, bucket, parts_object_name, protocol):
with object_storage.csv_writer(self.s3, bucket, parts_object_name, public_bucket=True) as csv_writer:
csv_writer.writerow(["header", "body"])
for part in protocol.parts:
csv_writer.writerow([part.header, part.body])
logging.info("parsed parts file -> {}".format(parts_object_name))
if __name__ == '__main__':
ParseCommitteeMeetingProtocolsProcessor.main()
|
fangxingli/hue | refs/heads/master | desktop/core/ext-py/markdown/markdown/extensions/legacy.py | 49 | """
Legacy Extension for Python-Markdown
====================================
Replaces the core parser with the old one.
"""
import markdown, re
from markdown import etree
"""Basic and reusable regular expressions."""
def wrapRe(raw_re) : return re.compile("^%s$" % raw_re, re.DOTALL)
CORE_RE = {
'header': wrapRe(r'(#{1,6})[ \t]*(.*?)[ \t]*(#*)'), # # A title
'reference-def': wrapRe(r'(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)'),
# [Google]: http://www.google.com/
'containsline': wrapRe(r'([-]*)$|^([=]*)'), # -----, =====, etc.
'ol': wrapRe(r'[ ]{0,3}[\d]*\.\s+(.*)'), # 1. text
'ul': wrapRe(r'[ ]{0,3}[*+-]\s+(.*)'), # "* text"
'isline1': wrapRe(r'(\**)'), # ***
'isline2': wrapRe(r'(\-*)'), # ---
'isline3': wrapRe(r'(\_*)'), # ___
'tabbed': wrapRe(r'((\t)|( ))(.*)'), # an indented line
'quoted': wrapRe(r'[ ]{0,2}> ?(.*)'), # a quoted block ("> ...")
'containsline': re.compile(r'^([-]*)$|^([=]*)$', re.M),
'attr': re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
}
class MarkdownParser:
"""Parser Markdown into a ElementTree."""
def __init__(self):
pass
def parseDocument(self, lines):
"""Parse a markdown string into an ElementTree."""
# Create a ElementTree from the lines
root = etree.Element("div")
buffer = []
for line in lines:
if line.startswith("#"):
self.parseChunk(root, buffer)
buffer = [line]
else:
buffer.append(line)
self.parseChunk(root, buffer)
return etree.ElementTree(root)
def parseChunk(self, parent_elem, lines, inList=0, looseList=0):
"""Process a chunk of markdown-formatted text and attach the parse to
an ElementTree node.
Process a section of a source document, looking for high
level structural elements like lists, block quotes, code
segments, html blocks, etc. Some those then get stripped
of their high level markup (e.g. get unindented) and the
lower-level markup is processed recursively.
Keyword arguments:
* parent_elem: The ElementTree element to which the content will be
added.
* lines: a list of lines
* inList: a level
Returns: None
"""
# Loop through lines until none left.
while lines:
# Skipping empty line
if not lines[0]:
lines = lines[1:]
continue
# Check if this section starts with a list, a blockquote or
# a code block. If so, process them.
processFn = { 'ul': self.__processUList,
'ol': self.__processOList,
'quoted': self.__processQuote,
'tabbed': self.__processCodeBlock}
for regexp in ['ul', 'ol', 'quoted', 'tabbed']:
m = CORE_RE[regexp].match(lines[0])
if m:
processFn[regexp](parent_elem, lines, inList)
return
# We are NOT looking at one of the high-level structures like
# lists or blockquotes. So, it's just a regular paragraph
# (though perhaps nested inside a list or something else). If
# we are NOT inside a list, we just need to look for a blank
# line to find the end of the block. If we ARE inside a
# list, however, we need to consider that a sublist does not
# need to be separated by a blank line. Rather, the following
# markup is legal:
#
# * The top level list item
#
# Another paragraph of the list. This is where we are now.
# * Underneath we might have a sublist.
#
if inList:
start, lines = self.__linesUntil(lines, (lambda line:
CORE_RE['ul'].match(line)
or CORE_RE['ol'].match(line)
or not line.strip()))
self.parseChunk(parent_elem, start, inList-1,
looseList=looseList)
inList = inList-1
else: # Ok, so it's just a simple block
test = lambda line: not line.strip() or line[0] == '>'
paragraph, lines = self.__linesUntil(lines, test)
if len(paragraph) and paragraph[0].startswith('#'):
self.__processHeader(parent_elem, paragraph)
elif len(paragraph) and CORE_RE["isline3"].match(paragraph[0]):
self.__processHR(parent_elem)
lines = paragraph[1:] + lines
elif paragraph:
self.__processParagraph(parent_elem, paragraph,
inList, looseList)
if lines and not lines[0].strip():
lines = lines[1:] # skip the first (blank) line
def __processHR(self, parentElem):
hr = etree.SubElement(parentElem, "hr")
def __processHeader(self, parentElem, paragraph):
m = CORE_RE['header'].match(paragraph[0])
if m:
level = len(m.group(1))
h = etree.SubElement(parentElem, "h%d" % level)
h.text = m.group(2).strip()
else:
message(CRITICAL, "We've got a problem header!")
def __processParagraph(self, parentElem, paragraph, inList, looseList):
if ( parentElem.tag == 'li'
and not (looseList or parentElem.getchildren())):
# If this is the first paragraph inside "li", don't
# put <p> around it - append the paragraph bits directly
# onto parentElem
el = parentElem
else:
# Otherwise make a "p" element
el = etree.SubElement(parentElem, "p")
dump = []
# Searching for hr or header
for line in paragraph:
# it's hr
if CORE_RE["isline3"].match(line):
el.text = "\n".join(dump)
self.__processHR(el)
dump = []
# it's header
elif line.startswith("#"):
el.text = "\n".join(dump)
self.__processHeader(parentElem, [line])
dump = []
else:
dump.append(line)
if dump:
text = "\n".join(dump)
el.text = text
def __processUList(self, parentElem, lines, inList):
self.__processList(parentElem, lines, inList, listexpr='ul', tag='ul')
def __processOList(self, parentElem, lines, inList):
self.__processList(parentElem, lines, inList, listexpr='ol', tag='ol')
def __processList(self, parentElem, lines, inList, listexpr, tag):
"""
Given a list of document lines starting with a list item,
finds the end of the list, breaks it up, and recursively
processes each list item and the remainder of the text file.
Keyword arguments:
* parentElem: A ElementTree element to which the content will be added
* lines: a list of lines
* inList: a level
Returns: None
"""
ul = etree.SubElement(parentElem, tag) # ul might actually be '<ol>'
looseList = 0
# Make a list of list items
items = []
item = -1
i = 0 # a counter to keep track of where we are
for line in lines:
loose = 0
if not line.strip():
# If we see a blank line, this _might_ be the end of the list
i += 1
loose = 1
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next = lines[j]
break
else:
# There is no more text => end of the list
break
# Check if the next non-blank line is still a part of the list
if ( CORE_RE[listexpr].match(next) or
CORE_RE['tabbed'].match(next) ):
# get rid of any white space in the line
items[item].append(line.strip())
looseList = loose or looseList
continue
else:
break # found end of the list
# Now we need to detect list items (at the current level)
# while also detabing child elements if necessary
for expr in ['ul', 'ol', 'tabbed']:
m = CORE_RE[expr].match(line)
if m:
if expr in ['ul', 'ol']: # We are looking at a new item
#if m.group(1) :
# Removed the check to allow for a blank line
# at the beginning of the list item
items.append([m.group(1)])
item += 1
elif expr == 'tabbed': # This line needs to be detabbed
items[item].append(m.group(4)) #after the 'tab'
i += 1
break
else:
items[item].append(line) # Just regular continuation
i += 1 # added on 2006.02.25
else:
i += 1
# Add the ElementTree elements
for item in items:
li = etree.SubElement(ul, "li")
self.parseChunk(li, item, inList + 1, looseList = looseList)
# Process the remaining part of the section
self.parseChunk(parentElem, lines[i:], inList)
def __linesUntil(self, lines, condition):
"""
A utility function to break a list of lines upon the
first line that satisfied a condition. The condition
argument should be a predicate function.
"""
i = -1
for line in lines:
i += 1
if condition(line):
break
else:
i += 1
return lines[:i], lines[i:]
def __processQuote(self, parentElem, lines, inList):
"""
Given a list of document lines starting with a quote finds
the end of the quote, unindents it and recursively
processes the body of the quote and the remainder of the
text file.
Keyword arguments:
* parentElem: ElementTree element to which the content will be added
* lines: a list of lines
* inList: a level
Returns: None
"""
dequoted = []
i = 0
blank_line = False # allow one blank line between paragraphs
for line in lines:
m = CORE_RE['quoted'].match(line)
if m:
dequoted.append(m.group(1))
i += 1
blank_line = False
elif not blank_line and line.strip() != '':
dequoted.append(line)
i += 1
elif not blank_line and line.strip() == '':
dequoted.append(line)
i += 1
blank_line = True
else:
break
blockquote = etree.SubElement(parentElem, "blockquote")
self.parseChunk(blockquote, dequoted, inList)
self.parseChunk(parentElem, lines[i:], inList)
def __processCodeBlock(self, parentElem, lines, inList):
"""
Given a list of document lines starting with a code block
finds the end of the block, puts it into the ElementTree verbatim
wrapped in ("<pre><code>") and recursively processes the
the remainder of the text file.
Keyword arguments:
* parentElem: ElementTree element to which the content will be added
* lines: a list of lines
* inList: a level
Returns: None
"""
detabbed, theRest = self.detectTabbed(lines)
pre = etree.SubElement(parentElem, "pre")
code = etree.SubElement(pre, "code")
text = "\n".join(detabbed).rstrip()+"\n"
code.text = markdown.AtomicString(text)
self.parseChunk(parentElem, theRest, inList)
def detectTabbed(self, lines):
""" Find indented text and remove indent before further proccesing.
Keyword arguments:
* lines: an array of strings
Returns: a list of post processed items and the unused
remainder of the original list
"""
items = []
item = -1
i = 0 # to keep track of where we are
def detab(line):
match = CORE_RE['tabbed'].match(line)
if match:
return match.group(4)
for line in lines:
if line.strip(): # Non-blank line
line = detab(line)
if line:
items.append(line)
i += 1
continue
else:
return items, lines[i:]
else: # Blank line: _maybe_ we are done.
i += 1 # advance
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next_line = lines[j]; break
else:
break # There is no more text; we are done.
# Check if the next non-blank line is tabbed
if detab(next_line): # Yes, more work to do.
items.append("")
continue
else:
break # No, we are done.
else:
i += 1
return items, lines[i:]
class HeaderPreprocessor(markdown.Preprocessor):
"""Replace underlined headers with hashed headers.
(To avoid the need for lookahead later.)
"""
def run (self, lines):
i = -1
while i+1 < len(lines):
i = i+1
if not lines[i].strip():
continue
if lines[i].startswith("#"):
lines.insert(i+1, "\n")
if (i+1 <= len(lines)
and lines[i+1]
and lines[i+1][0] in ['-', '=']):
underline = lines[i+1].strip()
if underline == "="*len(underline):
lines[i] = "# " + lines[i].strip()
lines[i+1] = ""
elif underline == "-"*len(underline):
lines[i] = "## " + lines[i].strip()
lines[i+1] = ""
return lines
class LinePreprocessor(markdown.Preprocessor):
"""Convert HR lines to "___" format."""
blockquote_re = re.compile(r'^(> )+')
def run (self, lines):
for i in range(len(lines)):
prefix = ''
m = self.blockquote_re.search(lines[i])
if m:
prefix = m.group(0)
if self._isLine(lines[i][len(prefix):]):
lines[i] = prefix + "___"
return lines
def _isLine(self, block):
"""Determine if a block should be replaced with an <HR>"""
if block.startswith(" "):
return False # a code block
text = "".join([x for x in block if not x.isspace()])
if len(text) <= 2:
return False
for pattern in ['isline1', 'isline2', 'isline3']:
m = CORE_RE[pattern].match(text)
if (m and m.group(1)):
return True
else:
return False
class LegacyExtension(markdown.Extension):
""" Replace Markdown's core parser. """
def extendMarkdown(self, md, md_globals):
""" Set the core parser to an instance of MarkdownParser. """
md.parser = MarkdownParser()
md.preprocessors.add ("header", HeaderPreprocessor(self), "<reference")
md.preprocessors.add("line", LinePreprocessor(self), "<reference")
def makeExtension(configs={}):
return LegacyExtension(configs=configs)
|
Nicolas570/chris_db | refs/heads/master | components/faker/faker/providers/company/it_IT/__init__.py | 19 | # coding=utf-8
from __future__ import unicode_literals
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
'{{last_name}} {{company_suffix}}',
'{{last_name}}-{{last_name}} {{company_suffix}}',
'{{last_name}}, {{last_name}} e {{last_name}} {{company_suffix}}'
)
catch_phrase_words = (
(
'Abilità', 'Access', 'Adattatore', 'Algoritmo', 'Alleanza', 'Analizzatore', 'Applicazione', 'Approccio',
'Architettura', 'Archivio', 'Intelligenza artificiale', 'Array', 'Attitudine', 'Benchmark', 'Capacità',
'Sfida', 'Circuito', 'Collaborazione', 'Complessità', 'Concetto', 'Conglomerato', 'Contingenza', 'Core',
'Database', 'Data-warehouse', 'Definizione', 'Emulazione', 'Codifica', 'Criptazione', 'Firmware',
'Flessibilità', 'Previsione', 'Frame', 'framework', 'Funzione', 'Funzionalità', 'Interfaccia grafica',
'Hardware', 'Help-desk', 'Gerarchia', 'Hub', 'Implementazione', 'Infrastruttura', 'Iniziativa',
'Installazione', 'Set di istruzioni', 'Interfaccia', 'Soluzione internet', 'Intranet', 'Conoscenza base',
'Matrici', 'Matrice', 'Metodologia', 'Middleware', 'Migrazione', 'Modello', 'Moderazione', 'Monitoraggio',
'Moratoria', 'Rete', 'Architettura aperta', 'Sistema aperto', 'Orchestrazione', 'Paradigma', 'Parallelismo',
'Policy', 'Portale', 'Struttura di prezzo', 'Prodotto', 'Produttività', 'Progetto', 'Proiezione',
'Protocollo', 'Servizio clienti', 'Software', 'Soluzione', 'Standardizzazione', 'Strategia', 'Struttura',
'Successo', 'Sovrastruttura', 'Supporto', 'Sinergia', 'Task-force', 'Finestra temporale', 'Strumenti',
'Utilizzazione', 'Sito web', 'Forza lavoro'
),
(
'adattiva', 'avanzata', 'migliorata', 'assimilata', 'automatizzata', 'bilanciata', 'centralizzata',
'compatibile', 'configurabile', 'cross-platform', 'decentralizzata', 'digitalizzata', 'distribuita',
'piccola', 'ergonomica', 'esclusiva', 'espansa', 'estesa', 'configurabile', 'fondamentale', 'orizzontale',
'implementata', 'innovativa', 'integrata', 'intuitiva', 'inversa', 'gestita', 'obbligatoria', 'monitorata',
'multi-canale', 'multi-laterale', 'open-source', 'operativa', 'ottimizzata', 'organica', 'persistente',
'polarizzata', 'proattiva', 'programmabile', 'progressiva', 'reattiva', 'riallineata', 'ricontestualizzata',
'ridotta', 'robusta', 'sicura', 'condivisibile', 'stand-alone', 'switchabile', 'sincronizzata', 'sinergica',
'totale', 'universale', 'user-friendly', 'versatile', 'virtuale', 'visionaria'
),
(
'24 ore', '24/7', 'terza generazione', 'quarta generazione', 'quinta generazione', 'sesta generazione',
'asimmetrica', 'asincrona', 'background', 'bi-direzionale', 'biforcata', 'bottom-line', 'coerente',
'coesiva', 'composita', 'sensibile al contesto', 'basta sul contesto', 'basata sul contenuto', 'dedicata',
'didattica', 'direzionale', 'discreta', 'dinamica', 'eco-centrica', 'esecutiva', 'esplicita', 'full-range',
'globale', 'euristica', 'alto livello', 'olistica', 'omogenea', 'ibrida', 'impattante', 'incrementale',
'intangibile', 'interattiva', 'intermediaria', 'locale', 'logistica', 'massimizzata', 'metodica',
'mission-critical', 'mobile', 'modulare', 'motivazionale', 'multimedia', 'multi-tasking', 'nazionale',
'neutrale', 'nextgeneration', 'non-volatile', 'object-oriented', 'ottima', 'ottimizzante', 'radicale',
'real-time', 'reciproca', 'regionale', 'responsiva', 'scalabile', 'secondaria', 'stabile', 'statica',
'sistematica', 'sistemica', 'tangibile', 'terziaria', 'uniforme', 'valore aggiunto'
)
)
bsWords = (
(
'partnerships', 'comunità', 'ROI', 'soluzioni', 'e-services', 'nicchie', 'tecnologie', 'contenuti',
'supply-chains', 'convergenze', 'relazioni', 'architetture', 'interfacce', 'mercati', 'e-commerce',
'sistemi', 'modelli', 'schemi', 'reti', 'applicazioni', 'metriche', 'e-business', 'funzionalità',
'esperienze', 'webservices', 'metodologie'
),
(
'implementate', 'utilizzo', 'integrate', 'ottimali', 'evolutive', 'abilitate', 'reinventate', 'aggregate',
'migliorate', 'incentivate', 'monetizzate', 'sinergizzate', 'strategiche', 'deploy', 'marchi',
'accrescitive', 'target', 'sintetizzate', 'spedizioni', 'massimizzate', 'innovazione', 'guida',
'estensioni', 'generate', 'exploit', 'transizionali', 'matrici', 'ricontestualizzate'
),
(
'valore aggiunto', 'verticalizzate', 'proattive', 'forti', 'rivoluzionari', 'scalabili', 'innovativi',
'intuitivi', 'strategici', 'e-business', 'mission-critical', '24/7', 'globali', 'B2B', 'B2C', 'granulari',
'virtuali', 'virali', 'dinamiche', 'magnetiche', 'web', 'interattive', 'sexy', 'back-end', 'real-time',
'efficienti', 'front-end', 'distributivi', 'estensibili', 'mondiali', 'open-source', 'cross-platform',
'sinergiche', 'out-of-the-box', 'enterprise', 'integrate', 'di impatto', 'wireless', 'trasparenti',
'next-generation', 'cutting-edge', 'visionari', 'plug-and-play', 'collaborative', 'olistiche', 'ricche'
)
)
company_suffixes = ('SPA', 'e figli', 'Group', 's.r.l.')
def catch_phrase(self):
"""
:example 'Robust full-range hub'
"""
result = []
for word_list in self.catch_phrase_words:
result.append(self.random_element(word_list))
return " ".join(result)
def bs(self):
"""
:example 'integrate extensible convergence'
"""
result = []
for word_list in self.bsWords:
result.append(self.random_element(word_list))
return " ".join(result) |
stenskjaer/scrapy | refs/heads/master | scrapy/core/__init__.py | 216 | """
Scrapy core library classes and functions.
"""
|
shaurz/ome | refs/heads/master | ome/emit.py | 1 | # ome - Object Message Expressions
# Copyright (c) 2015-2016 Luke McCarthy <luke@iogopro.co.uk>
import io
from contextlib import contextmanager
class CodeEmitter(object):
def __init__(self, indent=' ' * 4, indent_level=0):
self._output = []
self._indent = indent
self._indent_level = indent_level
self._indent_str = self._indent * indent_level
def indent(self):
self._indent_level += 1
self._indent_str = self._indent * self._indent_level
def dedent(self):
self._indent_level -= 1
self._indent_str = self._indent * self._indent_level
@contextmanager
def indented(self):
self.indent()
try:
yield
finally:
self.dedent()
def __call__(self, line):
self._output.append(self._indent_str + line)
def unindented(self, line):
self._output.append(line)
def write_to(self, buf):
for line in self._output:
buf.write(line)
buf.write('\n')
class ProcedureCodeEmitter(CodeEmitter):
def __init__(self, indent=' ' * 4):
super(ProcedureCodeEmitter, self).__init__(indent)
self._end_output = []
self._tail_emitters = []
def tail_emitter(self, label):
emitter = CodeEmitter(self._indent, self._indent_level)
emitter.label(label)
self._tail_emitters.append(emitter)
return emitter
def end(self, line):
self._end_output.append(self._indent_str + line)
def get_output(self):
buf = io.StringIO()
self.write_to(buf)
for emitter in self._tail_emitters:
emitter.write_to(buf)
for line in self._end_output:
buf.write(line)
buf.write('\n')
return buf.getvalue()
class MethodCode(object):
def __init__(self, instructions, num_args):
self.instructions = instructions
self.num_args = num_args
def generate_target_code(self, label, target):
emit = ProcedureCodeEmitter(indent=target.indent)
codegen = target.ProcedureCodegen(emit)
codegen.optimise(self)
codegen.begin(label, self.num_args)
for ins in self.instructions:
codegen.pre_instruction(ins)
ins.emit(codegen)
codegen.end()
return emit.get_output()
class MethodCodeBuilder(object):
def __init__(self, num_args, num_locals, program):
self.num_args = num_args + 1 # self is arg 0
self.num_locals = num_args + num_locals + 1
self.program = program
self.instructions = []
def add_temp(self):
local = self.num_locals
self.num_locals += 1
return local
def add_instruction(self, instruction):
self.instructions.append(instruction)
def allocate_string(self, string):
return self.program.data_table.allocate_string(string)
def allocate_large_integer(self, string):
return self.program.data_table.allocate_large_integer(string)
def get_tag(self, tag_name):
return self.program.ids.tags[tag_name]
def get_constant(self, constant_name):
return self.program.ids.constants[constant_name]
def make_message_label(self, symbol):
return self.program.target.make_message_label(symbol)
def make_lookup_label(self, symbol):
return self.program.target.make_lookup_label(symbol)
def make_method_label(self, tag, symbol):
return self.program.target.make_method_label(tag, symbol)
def get_code(self):
return MethodCode(self.instructions, self.num_args)
|
goliveirab/odoo | refs/heads/8.0 | addons/project_issue/res_config.py | 441 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class project_issue_settings(osv.osv_memory):
_name = 'project.config.settings'
_inherit = ['project.config.settings', 'fetchmail.config.settings']
_columns = {
'fetchmail_issue': fields.boolean("Create issues from an incoming email account ",
fetchmail_model='project.issue', fetchmail_name='Incoming Issues',
help="""Allows you to configure your incoming mail server, and create issues from incoming emails."""),
}
|
FlavienCollomb/calligraphr-php | refs/heads/master | vendor/doctrine/orm/docs/en/_exts/configurationblock.py | 2577 | #Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
|
Brocade-OpenSource/OpenStack-DNRM-Nova | refs/heads/master | nova/tests/cert/test_rpcapi.py | 6 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.cert.rpcapi
"""
from oslo.config import cfg
from nova.cert import rpcapi as cert_rpcapi
from nova import context
from nova.openstack.common import rpc
from nova import test
CONF = cfg.CONF
class CertRpcAPITestCase(test.TestCase):
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
expected_retval = 'foo'
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = expected_version
self.call_ctxt = None
self.call_topic = None
self.call_msg = None
self.call_timeout = None
def _fake_call(_ctxt, _topic, _msg, _timeout):
self.call_ctxt = _ctxt
self.call_topic = _topic
self.call_msg = _msg
self.call_timeout = _timeout
return expected_retval
self.stubs.Set(rpc, 'call', _fake_call)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
self.assertEqual(self.call_ctxt, ctxt)
self.assertEqual(self.call_topic, CONF.cert_topic)
self.assertEqual(self.call_msg, expected_msg)
self.assertEqual(self.call_timeout, None)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
|
shubhamgupta123/erpnext | refs/heads/master | erpnext/support/doctype/issue/issue.py | 2 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.model.document import Document
from frappe.utils import now
from frappe.utils.user import is_website_user
from frappe.email.inbox import link_communication_to_document
sender_field = "raised_by"
class Issue(Document):
def get_feed(self):
return "{0}: {1}".format(_(self.status), self.subject)
def validate(self):
if (self.get("__islocal") and self.via_customer_portal):
self.flags.create_communication = True
if not self.raised_by:
self.raised_by = frappe.session.user
self.update_status()
self.set_lead_contact(self.raised_by)
if self.status == "Closed":
from frappe.desk.form.assign_to import clear
clear(self.doctype, self.name)
def on_update(self):
# create the communication email and remove the description
if (self.flags.create_communication and self.via_customer_portal):
self.create_communication()
self.flags.communication_created = None
def set_lead_contact(self, email_id):
import email.utils
email_id = email.utils.parseaddr(email_id)[1]
if email_id:
if not self.lead:
self.lead = frappe.db.get_value("Lead", {"email_id": email_id})
if not self.contact and not self.customer:
self.contact = frappe.db.get_value("Contact", {"email_id": email_id})
if self.contact:
contact = frappe.get_doc('Contact', self.contact)
self.customer = contact.get_link_for('Customer')
if not self.company:
self.company = frappe.db.get_value("Lead", self.lead, "company") or \
frappe.db.get_default("Company")
def update_status(self):
status = frappe.db.get_value("Issue", self.name, "status")
if self.status!="Open" and status =="Open" and not self.first_responded_on:
self.first_responded_on = now()
if self.status=="Closed" and status !="Closed":
self.resolution_date = now()
if self.status=="Open" and status !="Open":
# if no date, it should be set as None and not a blank string "", as per mysql strict config
self.resolution_date = None
def create_communication(self):
communication = frappe.new_doc("Communication")
communication.update({
"communication_type": "Communication",
"communication_medium": "Email",
"sent_or_received": "Received",
"email_status": "Open",
"subject": self.subject,
"sender": self.raised_by,
"content": self.description,
"status": "Linked",
"reference_doctype": "Issue",
"reference_name": self.name
})
communication.ignore_permissions = True
communication.ignore_mandatory = True
communication.save()
self.db_set("description", "")
def split_issue(self, subject, communication_id):
# Bug: Pressing enter doesn't send subject
from copy import deepcopy
replicated_issue = deepcopy(self)
replicated_issue.subject = subject
frappe.get_doc(replicated_issue).insert()
# Replicate linked Communications
# todo get all communications in timeline before this, and modify them to append them to new doc
comm_to_split_from = frappe.get_doc("Communication", communication_id)
communications = frappe.get_all("Communication", filters={"reference_name": comm_to_split_from.reference_name, "reference_doctype": "Issue", "creation": ('>=', comm_to_split_from.creation)})
for communication in communications:
doc = frappe.get_doc("Communication", communication.name)
doc.reference_name = replicated_issue.name
doc.save(ignore_permissions=True)
return replicated_issue.name
def get_list_context(context=None):
return {
"title": _("Issues"),
"get_list": get_issue_list,
"row_template": "templates/includes/issue_row.html",
"show_sidebar": True,
"show_search": True,
'no_breadcrumbs': True
}
def get_issue_list(doctype, txt, filters, limit_start, limit_page_length=20, order_by=None):
from frappe.www.list import get_list
user = frappe.session.user
contact = frappe.db.get_value('Contact', {'user': user}, 'name')
customer = None
if contact:
contact_doc = frappe.get_doc('Contact', contact)
customer = contact_doc.get_link_for('Customer')
ignore_permissions = False
if is_website_user():
if not filters: filters = []
filters.append(("Issue", "customer", "=", customer)) if customer else filters.append(("Issue", "raised_by", "=", user))
ignore_permissions = True
return get_list(doctype, txt, filters, limit_start, limit_page_length, ignore_permissions=ignore_permissions)
@frappe.whitelist()
def set_status(name, status):
st = frappe.get_doc("Issue", name)
st.status = status
st.save()
def auto_close_tickets():
""" auto close the replied support tickets after 7 days """
auto_close_after_days = frappe.db.get_value("Support Settings", "Support Settings", "close_issue_after_days") or 7
issues = frappe.db.sql(""" select name from tabIssue where status='Replied' and
modified<DATE_SUB(CURDATE(), INTERVAL %s DAY) """, (auto_close_after_days), as_dict=True)
for issue in issues:
doc = frappe.get_doc("Issue", issue.get("name"))
doc.status = "Closed"
doc.flags.ignore_permissions = True
doc.flags.ignore_mandatory = True
doc.save()
@frappe.whitelist()
def set_multiple_status(names, status):
names = json.loads(names)
for name in names:
set_status(name, status)
def has_website_permission(doc, ptype, user, verbose=False):
from erpnext.controllers.website_list_for_contact import has_website_permission
permission_based_on_customer = has_website_permission(doc, ptype, user, verbose)
return permission_based_on_customer or doc.raised_by==user
def update_issue(contact, method):
"""Called when Contact is deleted"""
frappe.db.sql("""UPDATE `tabIssue` set contact='' where contact=%s""", contact.name)
@frappe.whitelist()
def make_issue_from_communication(communication, ignore_communication_links=False):
""" raise a issue from email """
doc = frappe.get_doc("Communication", communication)
issue = frappe.get_doc({
"doctype": "Issue",
"subject": doc.subject,
"communication_medium": doc.communication_medium,
"raised_by": doc.sender or "",
"raised_by_phone": doc.phone_no or ""
}).insert(ignore_permissions=True)
link_communication_to_document(doc, "Issue", issue.name, ignore_communication_links)
return issue.name |
majintao0131/yaml-cpp.core | refs/heads/master | test/create-emitter-tests.py | 87 | import sys
import yaml
import hashlib
DEFINE = 'YAML_GEN_TESTS'
EVENT_COUNT = 5
def encode_stream(line):
for c in line:
if c == '\n':
yield '\\n'
elif c == '"':
yield '\\"'
elif c == '\t':
yield '\\t'
elif ord(c) < 0x20:
yield '\\x' + hex(ord(c))
else:
yield c
def encode(line):
return ''.join(encode_stream(line))
def doc_start(implicit=False):
if implicit:
return {'emit': '', 'handle': 'OnDocumentStart(_)'}
else:
return {'emit': 'BeginDoc', 'handle': 'OnDocumentStart(_)'}
def doc_end(implicit=False):
if implicit:
return {'emit': '', 'handle': 'OnDocumentEnd()'}
else:
return {'emit': 'EndDoc', 'handle': 'OnDocumentEnd()'}
def scalar(value, tag='', anchor='', anchor_id=0):
emit = []
if tag:
emit += ['VerbatimTag("%s")' % encode(tag)]
if anchor:
emit += ['Anchor("%s")' % encode(anchor)]
if tag:
out_tag = encode(tag)
else:
if value == encode(value):
out_tag = '?'
else:
out_tag = '!'
emit += ['"%s"' % encode(value)]
return {'emit': emit, 'handle': 'OnScalar(_, "%s", %s, "%s")' % (out_tag, anchor_id, encode(value))}
def comment(value):
return {'emit': 'Comment("%s")' % value, 'handle': ''}
def seq_start(tag='', anchor='', anchor_id=0, style='_'):
emit = []
if tag:
emit += ['VerbatimTag("%s")' % encode(tag)]
if anchor:
emit += ['Anchor("%s")' % encode(anchor)]
if tag:
out_tag = encode(tag)
else:
out_tag = '?'
emit += ['BeginSeq']
return {'emit': emit, 'handle': 'OnSequenceStart(_, "%s", %s, %s)' % (out_tag, anchor_id, style)}
def seq_end():
return {'emit': 'EndSeq', 'handle': 'OnSequenceEnd()'}
def map_start(tag='', anchor='', anchor_id=0, style='_'):
emit = []
if tag:
emit += ['VerbatimTag("%s")' % encode(tag)]
if anchor:
emit += ['Anchor("%s")' % encode(anchor)]
if tag:
out_tag = encode(tag)
else:
out_tag = '?'
emit += ['BeginMap']
return {'emit': emit, 'handle': 'OnMapStart(_, "%s", %s, %s)' % (out_tag, anchor_id, style)}
def map_end():
return {'emit': 'EndMap', 'handle': 'OnMapEnd()'}
def gen_templates():
yield [[doc_start(), doc_start(True)],
[scalar('foo'), scalar('foo\n'), scalar('foo', 'tag'), scalar('foo', '', 'anchor', 1)],
[doc_end(), doc_end(True)]]
yield [[doc_start(), doc_start(True)],
[seq_start()],
[[], [scalar('foo')], [scalar('foo', 'tag')], [scalar('foo', '', 'anchor', 1)], [scalar('foo', 'tag', 'anchor', 1)], [scalar('foo'), scalar('bar')], [scalar('foo', 'tag', 'anchor', 1), scalar('bar', 'tag', 'other', 2)]],
[seq_end()],
[doc_end(), doc_end(True)]]
yield [[doc_start(), doc_start(True)],
[map_start()],
[[], [scalar('foo'), scalar('bar')], [scalar('foo', 'tag', 'anchor', 1), scalar('bar', 'tag', 'other', 2)]],
[map_end()],
[doc_end(), doc_end(True)]]
yield [[doc_start(True)],
[map_start()],
[[scalar('foo')], [seq_start(), scalar('foo'), seq_end()], [map_start(), scalar('foo'), scalar('bar'), map_end()]],
[[scalar('foo')], [seq_start(), scalar('foo'), seq_end()], [map_start(), scalar('foo'), scalar('bar'), map_end()]],
[map_end()],
[doc_end(True)]]
yield [[doc_start(True)],
[seq_start()],
[[scalar('foo')], [seq_start(), scalar('foo'), seq_end()], [map_start(), scalar('foo'), scalar('bar'), map_end()]],
[[scalar('foo')], [seq_start(), scalar('foo'), seq_end()], [map_start(), scalar('foo'), scalar('bar'), map_end()]],
[seq_end()],
[doc_end(True)]]
def expand(template):
if len(template) == 0:
pass
elif len(template) == 1:
for item in template[0]:
if isinstance(item, list):
yield item
else:
yield [item]
else:
for car in expand(template[:1]):
for cdr in expand(template[1:]):
yield car + cdr
def gen_events():
for template in gen_templates():
for events in expand(template):
base = list(events)
for i in range(0, len(base)+1):
cpy = list(base)
cpy.insert(i, comment('comment'))
yield cpy
def gen_tests():
for events in gen_events():
name = 'test' + hashlib.sha1(''.join(yaml.dump(event) for event in events)).hexdigest()[:20]
yield {'name': name, 'events': events}
class Writer(object):
def __init__(self, out):
self.out = out
self.indent = 0
def writeln(self, s):
self.out.write('%s%s\n' % (' ' * self.indent, s))
class Scope(object):
def __init__(self, writer, name, indent):
self.writer = writer
self.name = name
self.indent = indent
def __enter__(self):
self.writer.writeln('%s {' % self.name)
self.writer.indent += self.indent
def __exit__(self, type, value, traceback):
self.writer.indent -= self.indent
self.writer.writeln('}')
def create_emitter_tests(out):
out = Writer(out)
includes = [
'handler_test.h',
'yaml-cpp/yaml.h',
'gmock/gmock.h',
'gtest/gtest.h',
]
for include in includes:
out.writeln('#include "%s"' % include)
out.writeln('')
usings = [
'::testing::_',
]
for using in usings:
out.writeln('using %s;' % using)
out.writeln('')
with Scope(out, 'namespace YAML', 0) as _:
with Scope(out, 'namespace', 0) as _:
out.writeln('')
out.writeln('typedef HandlerTest GenEmitterTest;')
out.writeln('')
tests = list(gen_tests())
for test in tests:
with Scope(out, 'TEST_F(%s, %s)' % ('GenEmitterTest', test['name']), 2) as _:
out.writeln('Emitter out;')
for event in test['events']:
emit = event['emit']
if isinstance(emit, list):
for e in emit:
out.writeln('out << %s;' % e)
elif emit:
out.writeln('out << %s;' % emit)
out.writeln('')
for event in test['events']:
handle = event['handle']
if handle:
out.writeln('EXPECT_CALL(handler, %s);' % handle)
out.writeln('Parse(out.c_str());')
out.writeln('')
if __name__ == '__main__':
create_emitter_tests(sys.stdout)
|
xiechaopeng/tifi | refs/heads/master | dejavu/wavio.py | 20 | # wavio.py
# Author: Warren Weckesser
# License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause)
# Synopsis: A Python module for reading and writing 24 bit WAV files.
# Github: github.com/WarrenWeckesser/wavio
import wave as _wave
import numpy as _np
def _wav2array(nchannels, sampwidth, data):
"""data must be the string containing the bytes from the wav file."""
num_samples, remainder = divmod(len(data), sampwidth * nchannels)
if remainder > 0:
raise ValueError('The length of data is not a multiple of '
'sampwidth * num_channels.')
if sampwidth > 4:
raise ValueError("sampwidth must not be greater than 4.")
if sampwidth == 3:
a = _np.empty((num_samples, nchannels, 4), dtype=_np.uint8)
raw_bytes = _np.fromstring(data, dtype=_np.uint8)
a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth)
a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255
result = a.view('<i4').reshape(a.shape[:-1])
else:
# 8 bit samples are stored as unsigned ints; others as signed ints.
dt_char = 'u' if sampwidth == 1 else 'i'
a = _np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth))
result = a.reshape(-1, nchannels)
return result
def readwav(file):
"""
Read a WAV file.
Parameters
----------
file : string or file object
Either the name of a file or an open file pointer.
Return Values
-------------
rate : float
The sampling frequency (i.e. frame rate)
sampwidth : float
The sample width, in bytes. E.g. for a 24 bit WAV file,
sampwidth is 3.
data : numpy array
The array containing the data. The shape of the array is
(num_samples, num_channels). num_channels is the number of
audio channels (1 for mono, 2 for stereo).
Notes
-----
This function uses the `wave` module of the Python standard libary
to read the WAV file, so it has the same limitations as that library.
In particular, the function does not read compressed WAV files.
"""
wav = _wave.open(file)
rate = wav.getframerate()
nchannels = wav.getnchannels()
sampwidth = wav.getsampwidth()
nframes = wav.getnframes()
data = wav.readframes(nframes)
wav.close()
array = _wav2array(nchannels, sampwidth, data)
return rate, sampwidth, array
def writewav24(filename, rate, data):
"""
Create a 24 bit wav file.
Parameters
----------
filename : string
Name of the file to create.
rate : float
The sampling frequency (i.e. frame rate) of the data.
data : array-like collection of integer or floating point values
data must be "array-like", either 1- or 2-dimensional. If it
is 2-d, the rows are the frames (i.e. samples) and the columns
are the channels.
Notes
-----
The data is assumed to be signed, and the values are assumed to be
within the range of a 24 bit integer. Floating point values are
converted to integers. The data is not rescaled or normalized before
writing it to the file.
Example
-------
Create a 3 second 440 Hz sine wave.
>>> rate = 22050 # samples per second
>>> T = 3 # sample duration (seconds)
>>> f = 440.0 # sound frequency (Hz)
>>> t = np.linspace(0, T, T*rate, endpoint=False)
>>> x = (2**23 - 1) * np.sin(2 * np.pi * f * t)
>>> writewav24("sine24.wav", rate, x)
"""
a32 = _np.asarray(data, dtype=_np.int32)
if a32.ndim == 1:
# Convert to a 2D array with a single column.
a32.shape = a32.shape + (1,)
# By shifting first 0 bits, then 8, then 16, the resulting output
# is 24 bit little-endian.
a8 = (a32.reshape(a32.shape + (1,)) >> _np.array([0, 8, 16])) & 255
wavdata = a8.astype(_np.uint8).tostring()
w = _wave.open(filename, 'wb')
w.setnchannels(a32.shape[1])
w.setsampwidth(3)
w.setframerate(rate)
w.writeframes(wavdata)
w.close()
|
RyanSkraba/beam | refs/heads/master | sdks/python/apache_beam/runners/runner_test.py | 1 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the PipelineRunner and DirectRunner classes.
Note that PipelineRunner and DirectRunner functionality is tested in all
the other unit tests. In this file we choose to test only aspects related to
caching and clearing values that are not tested elsewhere.
"""
from __future__ import absolute_import
import unittest
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.runners import DirectRunner
from apache_beam.runners import create_runner
class RunnerTest(unittest.TestCase):
default_properties = [
'--dataflow_endpoint=ignored',
'--job_name=test-job',
'--project=test-project',
'--staging_location=ignored',
'--temp_location=/dev/null',
'--no_auth']
def test_create_runner(self):
self.assertTrue(
isinstance(create_runner('DirectRunner'), DirectRunner))
self.assertRaises(ValueError, create_runner, 'xyz')
def test_create_runner_shorthand(self):
self.assertTrue(
isinstance(create_runner('DiReCtRuNnEr'), DirectRunner))
self.assertTrue(
isinstance(create_runner('directrunner'), DirectRunner))
self.assertTrue(
isinstance(create_runner('direct'), DirectRunner))
self.assertTrue(
isinstance(create_runner('DiReCt'), DirectRunner))
self.assertTrue(
isinstance(create_runner('Direct'), DirectRunner))
def test_run_api(self):
my_metric = Metrics.counter('namespace', 'my_metric')
runner = DirectRunner()
result = runner.run(
beam.Create([1, 10, 100]) | beam.Map(lambda x: my_metric.inc(x)))
result.wait_until_finish()
# Use counters to assert the pipeline actually ran.
my_metric_value = result.metrics().query()['counters'][0].committed
self.assertEqual(my_metric_value, 111)
def test_run_api_with_callable(self):
my_metric = Metrics.counter('namespace', 'my_metric')
def fn(start):
return (start
| beam.Create([1, 10, 100])
| beam.Map(lambda x: my_metric.inc(x)))
runner = DirectRunner()
result = runner.run(fn)
result.wait_until_finish()
# Use counters to assert the pipeline actually ran.
my_metric_value = result.metrics().query()['counters'][0].committed
self.assertEqual(my_metric_value, 111)
if __name__ == '__main__':
unittest.main()
|
vgonisanz/wpm | refs/heads/master | examples/buymenu_sample.py | 1 | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src/widgets'))
import curses # Todo remove chaning own variables
from curses import wrapper # Use my own wrapper
from wpm import Wpm
from menu import Menu
from optionstruct import OptionStruct
# Configuration
buymenu_width = 30
buymenu_height = 15
buymenu_x0 = 5
buymenu_y0 = 5
buymenu_title = "Buy menu"
buymenu_instructions = "Use arrows to move, ENTER to select, q to quit"
buymenu_centered = True
# Variables
wpm = None
screen = None
buymenu = None
def initialize():
global wpm
global screen
wpm = Wpm(True)
wpm.logger.info("Starting %s" % os.path.basename(__file__))
screen = wpm.get_screen()
return None
def create_buymenu():
global buymenu
# Create submenu 1
submenu_1_list = {}
submenu_1_list['title'] = "Choose a pistol"
submenu_1_list['names'] = ["USP .45ACP Tactical", "Glock 18C Select Fire", "Desert Eagle .50 AE", "SIG P228", "FN Five-Seven"]
submenu_1_list['callbacks'] = [print_result, print_result, print_result, print_result, print_result]
submenu_1_list['args'] = ["400$", "400$", "650$", "600$", "750$"]
submenu_1 = create_menu(submenu_1_list['title'], submenu_1_list['names'], submenu_1_list['callbacks'], submenu_1_list['args'])
submenu_2_list = {}
submenu_2_list['title'] = "Choose a shotgun"
submenu_2_list['names'] = ["M3 Super 90 Combat", "XM1014"]
submenu_2_list['callbacks'] = [print_result, print_result]
submenu_2_list['args'] = ["1700$", "3000$"]
submenu_2 = create_menu(submenu_2_list['title'], submenu_2_list['names'], submenu_2_list['callbacks'], submenu_2_list['args'])
submenu_3_list = {}
submenu_3_list['title'] = "Choose a sub machine gun"
submenu_3_list['names'] = ["MP5 Navy", "Streyr TMP", "FN P90", "MAC-10", "H&K UMP"]
submenu_3_list['callbacks'] = [print_result, print_result, print_result, print_result, print_result]
submenu_3_list['args'] = ["1500$", "1250$", "2350$", "1400$", "1700$"]
submenu_3 = create_menu(submenu_3_list['title'], submenu_3_list['names'], submenu_3_list['callbacks'], submenu_3_list['args'])
submenu_4_list = {}
submenu_4_list['title'] = "Choose a rifle"
submenu_4_list['names'] = ["AK47", "M4A1 Carbine/COLT", "SG552 Commando", "Steyr AUG", "Steyr Scout Sniper Rifle", "Artic Warfare Magnum", "SIG SG-550", "G3/SG1 Sniper Rifle"]
submenu_4_list['callbacks'] = [print_result, print_result, print_result, print_result, print_result, print_result, print_result, print_result]
submenu_4_list['args'] = ["2500$", "3100$", "3500$", "3500$", "2750$", "4750$", "4200$", "5000$"]
submenu_4 = create_menu(submenu_4_list['title'], submenu_4_list['names'], submenu_4_list['callbacks'], submenu_4_list['args'])
submenu_5_list = {}
submenu_5_list['title'] = "Choose a machineGun"
submenu_5_list['names'] = ["FN M249 PARA"]
submenu_5_list['callbacks'] = [print_result]
submenu_5_list['args'] = ["5750$"]
submenu_5 = create_menu(submenu_5_list['title'], submenu_5_list['names'], submenu_5_list['callbacks'], submenu_5_list['args'])
submenu_8_list = {}
submenu_8_list['title'] = "Choose a equipment"
submenu_8_list['names'] = ["Armor", "Armor|Helmet", "Flash", "Grenade", "Smoke", "Defuser", "NightVision", "Shield"]
submenu_8_list['callbacks'] = [print_result, print_result, print_result, print_result, print_result, print_result, print_result, print_result]
submenu_8_list['args'] = ["650$", "1000$", "200$", "300$", "300$", "200$", "1250$", "1000$"]
submenu_8 = create_menu(submenu_8_list['title'], submenu_8_list['names'], submenu_8_list['callbacks'], submenu_8_list['args'])
# Create test buymenu and run
main_menu_list = {}
main_menu_list['title'] = "Choose a weapon to buy"
main_menu_list['names'] = ["Pistols", "Shotguns", "SMG", "Rifles", "MachineGuns", "Primary ammo", "Secondary ammo", "Equipment"]
main_menu_list['callbacks'] = [launch_menu, launch_menu, launch_menu, launch_menu, launch_menu, print_result, print_result, launch_menu]
main_menu_list['args'] = [submenu_1, submenu_2, submenu_3, submenu_4, submenu_5, ["Primary ammo", "100$"], ["Secondary ammo", "60$"], submenu_8]
main_menu = create_menu(main_menu_list['title'], main_menu_list['names'], main_menu_list['callbacks'], main_menu_list['args'])
main_menu.run()
screen.waitforkey()
return None
def create_menu(title, name_list, callback_list = None, args_list = None):
menu = Menu(buymenu_width, buymenu_height, buymenu_x0, buymenu_y0, title)
# Create options
for i in range(0, len(name_list)):
name = name_list[i]
callback = None
args = None
if callback_list:
callback = callback_list[i]
if args_list:
args = args_list[i]
option = OptionStruct(name, callback, args)
menu.add_option(option)
return menu
def launch_menu(menu):
menu.run()
return None
def print_result(money):
"""Maybe menu implement a callback with a pointer to itself to read info.
"""
screen.print_message("You buy for: %s" % money, 0, 0)
screen.waitforkey()
screen.clear()
return None
def main(stdscr):
initialize()
create_buymenu()
return None
if __name__ == "__main__":
wrapper(main)
print("Thanks for use %s" % os.path.basename(__file__))
|
tiborsimon/projects | refs/heads/master | projects/gui/gui.py | 1 | import pydoc
import urwid
from projects.gui import doc_generator
from projects.gui.project_selector import ProjectSelector
def select_project(project_list, path_callback):
max_width = len(max(project_list, key=len))
f = ProjectSelector(project_list, 'normal', 'highlighted', 'selected')
def refresh_list(key=''):
if key:
if key in ('delete', 'backspace'):
f.remove_key()
else:
if key in 'abcdefghijklmnopqrstuvwxyz- .0123456789':
f.add_key(key)
s = f.render()
txt.set_text(s)
def exit_on_q(key):
if key.__class__ is not str:
return
if key in ('Q',):
raise urwid.ExitMainLoop()
if key == 'up':
f.up()
if key == 'down':
f.down()
if key == 'enter':
path_callback(f.select())
raise urwid.ExitMainLoop()
key = key.lower()
refresh_list(key)
palette = [
('normal', 'light gray', ''),
('selected', 'yellow, bold', ''),
('highlighted', 'black, bold', 'yellow'),
('quit button', 'light red, bold', ''),
('enter button', 'light green, bold', '')
]
txt = urwid.Text('', align='left')
fill = urwid.Filler(txt)
pad = urwid.Padding(fill, align='center', width=max_width+4)
box = urwid.LineBox(pad, title="Projects")
footer = urwid.Text(['Start typing to search. Use arrow keys to navigate. Press (', ('enter button', 'Enter'), ') to select project. ', 'Press (', ('quit button', 'Q'), ') to exit.'])
frame = urwid.Frame(body=box, footer=footer)
loop = urwid.MainLoop(frame, palette, unhandled_input=exit_on_q)
refresh_list()
loop.run()
def show_project_details(data, width):
doc = doc_generator.generate_doc(data, width)
doc = doc.format(
head='\033[0;33m',
project='\033[1;36m',
command='\033[1;33m',
reset='\033[0m'
)
pydoc.pipepager(doc, cmd='less -R')
# pydoc.pager(doc)
|
wimac/home | refs/heads/master | Dropbox/skel/bin/sick-beard/lib/hachoir_parser/misc/pdf.py | 90 | """
Adobe Portable Document Format (PDF) parser.
Author: Christophe Gisquet <christophe.gisquet@free.fr>
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (
Field, FieldSet,
ParserError,
GenericVector,
UInt8, UInt16, UInt32,
String,
RawBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
MAGIC = "%PDF-"
ENDMAGIC = "%%EOF"
def getLineEnd(s, pos=None):
if pos == None:
pos = (s.absolute_address+s.current_size)//8
end = s.stream.searchBytesLength("\x0D", False, 8*pos)
other_end = s.stream.searchBytesLength("\x0A", False, 8*pos)
if end == None or (other_end != None and other_end < end):
return other_end
return end
# TODO: rewrite to account for all possible terminations: ' ', '/', '\0XD'
# But this probably requires changing *ALL* of the places they are used,
# as ' ' is swallowed but not the others
def getElementEnd(s, limit=' ', offset=0):
addr = s.absolute_address+s.current_size
addr += 8*offset
pos = s.stream.searchBytesLength(limit, True, addr)
if pos == None:
#s.info("Can't find '%s' starting at %u" % (limit, addr))
return None
return pos
class PDFNumber(Field):
LIMITS = ['[', '/', '\x0D', ']']
"""
sprintf("%i") or sprinf("%.?f")
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
# Get size
size = getElementEnd(parent)
for limit in self.LIMITS:
other_size = getElementEnd(parent, limit)
if other_size != None:
other_size -= 1
if size == None or other_size < size:
size = other_size
self._size = 8*size
# Get value
val = parent.stream.readBytes(self.absolute_address, size)
self.info("Number: size=%u value='%s'" % (size, val))
if val.find('.') != -1:
self.createValue = lambda: float(val)
else:
self.createValue = lambda: int(val)
class PDFString(Field):
"""
A string of the shape:
( This string \
uses 3 lines \
with the CR(LF) inhibited )
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
val = ""
count = 1
off = 1
while not parent.eof:
char = parent.stream.readBytes(self.absolute_address+8*off, 1)
# Non-ASCII
if not char.isalpha() or char == '\\':
off += 1
continue
if char == '(':
count += 1
if char == ')':
count -= 1
# Parenthesis block = 0 => end of string
if count == 0:
off += 1
break
# Add it to the string
val += char
self._size = 8*off
self.createValue = lambda: val
class PDFName(Field):
LIMITS = ['[', '/', '<', ']']
"""
String starting with '/', where characters may be written using their
ASCII code (exemple: '#20' would be ' '
' ', ']' and '\0' are supposed not to be part of the name
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
if parent.stream.readBytes(self.absolute_address, 1) != '/':
raise ParserError("Unknown PDFName '%s'" %
parent.stream.readBytes(self.absolute_address, 10))
size = getElementEnd(parent, offset=1)
#other_size = getElementEnd(parent, '[')-1
#if size == None or (other_size != None and other_size < size):
# size = other_size
for limit in self.LIMITS:
other_size = getElementEnd(parent, limit, 1)
if other_size != None:
other_size -= 1
if size == None or other_size < size:
#self.info("New size: %u" % other_size)
size = other_size
self._size = 8*(size+1)
# Value should be without the initial '/' and final ' '
self.createValue = lambda: parent.stream.readBytes(self.absolute_address+8, size).strip(' ')
class PDFID(Field):
"""
Not described as an object, but let's do as it was.
This ID has the shape <hexadecimal ASCII string>
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
self._size = 8*getElementEnd(parent, '>')
self.createValue = lambda: parent.stream.readBytes(self.absolute_address+8, (self._size//8)-1)
class NotABool(Exception): pass
class PDFBool(Field):
"""
"true" or "false" string standing for the boolean value
"""
def __init__(self, parent, name, desc=None):
Field.__init__(self, parent, name, description=desc)
if parent.stream.readBytes(self.absolute_address, 4) == "true":
self._size = 4
self.createValue = lambda: True
elif parent.stream.readBytes(self.absolute_address, 5) == "false":
self._size = 5
self.createValue = lambda: False
raise NotABool
class LineEnd(FieldSet):
"""
Made of 0x0A, 0x0D (we may include several line ends)
"""
def createFields(self):
while not self.eof:
addr = self.absolute_address+self.current_size
char = self.stream.readBytes(addr, 1)
if char == '\x0A':
yield UInt8(self, "lf", "Line feed")
elif char == '\x0D':
yield UInt8(self, "cr", "Line feed")
else:
self.info("Line ends at %u/%u, len %u" %
(addr, self.stream._size, self.current_size))
break
class PDFDictionaryPair(FieldSet):
def createFields(self):
yield PDFName(self, "name", getElementEnd(self))
for field in parsePDFType(self):
yield field
class PDFDictionary(FieldSet):
def createFields(self):
yield String(self, "dict_start", 2)
while not self.eof:
addr = self.absolute_address+self.current_size
if self.stream.readBytes(addr, 2) != '>>':
for field in parsePDFType(self):
yield field
else:
break
yield String(self, "dict_end", 2)
class PDFArray(FieldSet):
"""
Array of possibly non-homogeneous elements, starting with '[' and ending
with ']'
"""
def createFields(self):
yield String(self, "array_start", 1)
while self.stream.readBytes(self.absolute_address+self.current_size, 1) != ']':
for field in parsePDFType(self):
yield field
yield String(self, "array_end", 1)
def parsePDFType(s):
addr = s.absolute_address+s.current_size
char = s.stream.readBytes(addr, 1)
if char == '/':
yield PDFName(s, "type[]", getElementEnd(s))
elif char == '<':
if s.stream.readBytes(addr+8, 1) == '<':
yield PDFDictionary(s, "dict[]")
else:
yield PDFID(s, "id[]")
elif char == '(':
yield PDFString(s, "string[]")
elif char == '[':
yield PDFArray(s, "array[]")
else:
# First parse size
size = getElementEnd(s)
for limit in ['/', '>', '<']:
other_size = getElementEnd(s, limit)
if other_size != None:
other_size -= 1
if size == None or (other_size>0 and other_size < size):
size = other_size
# Get element
name = s.stream.readBytes(addr, size)
char = s.stream.readBytes(addr+8*size+8, 1)
if name.count(' ') > 1 and char == '<':
# Probably a catalog
yield Catalog(s, "catalog[]")
elif name[0] in ('.','-','+', '0', '1', '2', '3', \
'4', '5', '6', '7', '8', '9'):
s.info("Not a catalog: %u spaces and end='%s'" % (name.count(' '), char))
yield PDFNumber(s, "integer[]")
else:
s.info("Trying to parse '%s': %u bytes" % \
(s.stream.readBytes(s.absolute_address+s.current_size, 4), size))
yield String(s, "unknown[]", size)
class Header(FieldSet):
def createFields(self):
yield String(self, "marker", 5, MAGIC)
length = getLineEnd(self, 4)
if length != None:
#self.info("Found at position %08X" % len)
yield String(self, "version", length-1)
yield LineEnd(self, "line_end")
else:
self.warning("Can't determine version!")
def createDescription(self):
return "PDF version %s" % self["version"].display
class Body(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
pos = self.stream.searchBytesLength(CrossReferenceTable.MAGIC, False)
if pos == None:
raise ParserError("Can't find xref starting at %u" %
(self.absolute_address//8))
self._size = 8*pos-self.absolute_address
def createFields(self):
while self.stream.readBytes(self.absolute_address+self.current_size, 1) == '%':
size = getLineEnd(self, 4)
if size == 2:
yield textHandler(UInt16(self, "crc32"), hexadecimal)
elif size == 4:
yield textHandler(UInt32(self, "crc32"), hexadecimal)
elif self.stream.readBytes(self.absolute_address+self.current_size, size).isalpha():
yield String(self, "comment[]", size)
else:
RawBytes(self, "unknown_data[]", size)
yield LineEnd(self, "line_end[]")
#abs_offset = self.current_size//8
# TODO: yield objects that read offsets and deduce size from
# "/cross_ref_table/sub_section[]/entries/item[]"
offsets = []
for subsection in self.array("/cross_ref_table/sub_section"):
for obj in subsection.array("entries/item"):
if "byte_offset" in obj:
# Could be inserted already sorted
offsets.append(obj["byte_offset"].value)
offsets.append(self["/cross_ref_table"].absolute_address//8)
offsets.sort()
for index in xrange(len(offsets)-1):
yield Catalog(self, "object[]", size=offsets[index+1]-offsets[index])
class Entry(FieldSet):
static_size = 20*8
def createFields(self):
typ = self.stream.readBytes(self.absolute_address+17*8, 1)
if typ == 'n':
yield PDFNumber(self, "byte_offset")
elif typ == 'f':
yield PDFNumber(self, "next_free_object_number")
else:
yield PDFNumber(self, "unknown_string")
yield PDFNumber(self, "generation_number")
yield UInt8(self, "type")
yield LineEnd(self, "line_end")
def createDescription(self):
if self["type"].value == 'n':
return "In-use entry at offset %u" % int(self["byte_offset"].value)
elif self["type"].value == 'f':
return "Free entry before in-use object %u" % \
int(self["next_free_object_number"].value)
else:
return "unknown %s" % self["unknown_string"].value
class SubSection(FieldSet):
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, desc)
self.info("Got entry count: '%s'" % self["entry_count"].value)
self._size = self.current_size + 8*20*int(self["entry_count"].value) \
+ self["line_end"].size
def createFields(self):
yield PDFNumber(self, "start_number",
"Object number of first entry in subsection")
self.info("start_number = %i" % self["start_number"].value)
yield PDFNumber(self, "entry_count", "Number of entries in subsection")
self.info("entry_count = %i" % self["entry_count"].value)
yield LineEnd(self, "line_end")
yield GenericVector(self, "entries", int(self["entry_count"].value),
Entry)
#yield LineEnd(self, "line_end[]")
def createDescription(self):
return "Subsection with %s elements, starting at %s" % \
(self["entry_count"].value, self["start_number"])
class CrossReferenceTable(FieldSet):
MAGIC = "xref"
def __init__(self, parent, name, desc=None):
FieldSet.__init__(self, parent, name, description=desc)
pos = self.stream.searchBytesLength(Trailer.MAGIC, False)
if pos == None:
raise ParserError("Can't find '%s' starting at %u" \
(Trailer.MAGIC, self.absolute_address//8))
self._size = 8*pos-self.absolute_address
def createFields(self):
yield RawBytes(self, "marker", len(self.MAGIC))
yield LineEnd(self, "line_end[]")
while not self.eof:
yield SubSection(self, "sub_section[]")
class Catalog(FieldSet):
END_NAME = ['<', '/', '[']
def __init__(self, parent, name, size=None, desc=None):
FieldSet.__init__(self, parent, name, description=desc)
if size != None:
self._size = 8*size
# object catalogs are ended with "obj"
elif self["object"].value == "obj":
size = self.stream.searchBytesLength("endobj", False)
if size != None:
self._size = 8*(size+2)
def createFields(self):
yield PDFNumber(self, "index")
yield PDFNumber(self, "unknown[]")
length = getElementEnd(self)
for limit in self.END_NAME:
new_length = getElementEnd(self, limit)-len(limit)
if length == None or (new_length != None and new_length < length):
length = new_length
yield String(self, "object", length, strip=' ')
if self.stream.readBytes(self.absolute_address+self.current_size, 2) == '<<':
yield PDFDictionary(self, "key_list")
# End of catalog: this one has "endobj"
if self["object"].value == "obj":
yield LineEnd(self, "line_end[]")
yield String(self, "end_object", len("endobj"))
yield LineEnd(self, "line_end[]")
class Trailer(FieldSet):
MAGIC = "trailer"
def createFields(self):
yield RawBytes(self, "marker", len(self.MAGIC))
yield LineEnd(self, "line_end[]")
yield String(self, "start_attribute_marker", 2)
addr = self.absolute_address + self.current_size
while self.stream.readBytes(addr, 2) != '>>':
t = PDFName(self, "type[]")
yield t
name = t.value
self.info("Parsing PDFName '%s'" % name)
if name == "Size":
yield PDFNumber(self, "size", "Entries in the file cross-reference section")
elif name == "Prev":
yield PDFNumber(self, "offset")
elif name == "Root":
yield Catalog(self, "object_catalog")
elif name == "Info":
yield Catalog(self, "info")
elif name == "ID":
yield PDFArray(self, "id")
elif name == "Encrypt":
yield PDFDictionary(self, "decrypt")
else:
raise ParserError("Don't know trailer type '%s'" % name)
addr = self.absolute_address + self.current_size
yield String(self, "end_attribute_marker", 2)
yield LineEnd(self, "line_end[]")
yield String(self, "start_xref", 9)
yield LineEnd(self, "line_end[]")
yield PDFNumber(self, "cross_ref_table_start_address")
yield LineEnd(self, "line_end[]")
yield String(self, "end_marker", len(ENDMAGIC))
yield LineEnd(self, "line_end[]")
class PDFDocument(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "pdf",
"category": "misc",
"file_ext": ("pdf",),
"mime": (u"application/pdf",),
"min_size": (5+4)*8,
"magic": ((MAGIC, 5),),
"description": "Portable Document Format (PDF) document"
}
def validate(self):
if self.stream.readBytes(0, len(MAGIC)) != MAGIC:
return "Invalid magic string"
return True
# Size is not always determined by position of "%%EOF":
# - updated documents have several of those
# - PDF files should be parsed from *end*
# => TODO: find when a document has been updated
def createFields(self):
yield Header(self, "header")
yield Body(self, "body")
yield CrossReferenceTable(self, "cross_ref_table")
yield Trailer(self, "trailer")
|
hpcloud/docker-registry | refs/heads/master | lib/storage/swift.py | 8 |
import cache_lru
import swiftclient
from . import Storage
class SwiftStorage(Storage):
def __init__(self, config):
self._swift_connection = self._create_swift_connection(config)
self._swift_container = config.swift_container
self._root_path = config.get('storage_path', '/')
def _create_swift_connection(self, config):
return swiftclient.client.Connection(
authurl=config.get('swift_authurl'),
user=config.get('swift_user'),
key=config.get('swift_password'),
auth_version=config.get('swift_auth_version', 2),
os_options={
'tenant_name': config.get('swift_tenant_name'),
'region_name': config.get('swift_region_name')
})
def _init_path(self, path=None):
path = self._root_path + '/' + path if path else self._root_path
# Openstack does not like paths starting with '/'
if path and path.startswith('/'):
path = path[1:]
return path
@cache_lru.get
def get_content(self, path, chunk_size=None):
path = self._init_path(path)
try:
_, obj = self._swift_connection.get_object(
self._swift_container,
path,
resp_chunk_size=chunk_size)
return obj
except Exception:
raise IOError("Could not get content: {}".format(path))
@cache_lru.put
def put_content(self, path, content, chunk=None):
path = self._init_path(path)
try:
self._swift_connection.put_object(self._swift_container,
path,
content,
chunk_size=chunk)
return path
except Exception:
raise IOError("Could not put content: {}".format(path))
def stream_read(self, path, bytes_range=None):
try:
for buf in self.get_content(path, self.buffer_size):
yield buf
except Exception:
raise OSError(
"Could not read content from stream: {}".format(path))
def stream_write(self, path, fp):
self.put_content(path, fp, self.buffer_size)
def list_directory(self, path=None):
try:
path = self._init_path(path)
if path and not path.endswith('/'):
path += '/'
_, directory = self._swift_connection.get_container(
container=self._swift_container,
path=path)
if not directory:
raise
for inode in directory:
# trim extra trailing slashes
if inode['name'].endswith('/'):
inode['name'] = inode['name'][:-1]
yield inode['name'].replace(self._root_path[1:] + '/', '', 1)
except Exception:
raise OSError("No such directory: {}".format(path))
def exists(self, path):
try:
self.get_content(path)
return True
except Exception:
return False
@cache_lru.remove
def remove(self, path):
path = self._init_path(path)
try:
self._swift_connection.delete_object(self._swift_container, path)
except Exception:
pass
def get_size(self, path):
try:
return len(self.get_content(path))
except Exception:
raise OSError("Could not get file size: {}".format(path))
|
PongPi/isl-odoo | refs/heads/8.0 | addons/document/content_index.py | 430 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
from subprocess import Popen, PIPE
_logger = logging.getLogger(__name__)
class NhException(Exception):
pass
class indexer(object):
""" An indexer knows how to parse the content of some file.
Typically, one indexer should be instantiated per file
type.
Override this class to add more functionality. Note that
you should only override the Content or the File methods
that give an optimal result. """
def _getMimeTypes(self):
""" Return supported mimetypes """
return []
def _getExtensions(self):
return []
def _getDefMime(self, ext):
""" Return a mimetype for this document type, ideally the
closest to the extension ext. """
mts = self._getMimeTypes();
if len (mts):
return mts[0]
return None
def indexContent(self, content, filename=None, realfile=None):
""" Use either content or the real file, to index.
Some parsers will work better with the actual
content, others parse a file easier. Try the
optimal.
"""
res = ''
try:
if content != None:
return self._doIndexContent(content)
except NhException:
pass
if realfile != None:
try:
return self._doIndexFile(realfile)
except NhException:
pass
fp = open(realfile,'rb')
try:
content2 = fp.read()
finally:
fp.close()
# The not-handled exception may be raised here
return self._doIndexContent(content2)
# last try, with a tmp file
if content:
try:
fname,ext = filename and os.path.splitext(filename) or ('','')
fd, rfname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
res = self._doIndexFile(rfname)
os.unlink(rfname)
return res
except NhException:
pass
raise NhException('No appropriate method to index file.')
def _doIndexContent(self, content):
raise NhException("Content cannot be handled here.")
def _doIndexFile(self, fpath):
raise NhException("Content cannot be handled here.")
def __repr__(self):
return "<indexer %s.%s>" %(self.__module__, self.__class__.__name__)
def mime_match(mime, mdict):
if mdict.has_key(mime):
return (mime, mdict[mime])
if '/' in mime:
mpat = mime.split('/')[0]+'/*'
if mdict.has_key(mpat):
return (mime, mdict[mpat])
return (None, None)
class contentIndex(object):
def __init__(self):
self.mimes = {}
self.exts = {}
def register(self, obj):
f = False
for mime in obj._getMimeTypes():
self.mimes[mime] = obj
f = True
for ext in obj._getExtensions():
self.exts[ext] = obj
f = True
if f:
_logger.debug('Register content indexer: %r.', obj)
if not f:
raise Exception("Your indexer should at least support a mimetype or extension.")
def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False):
fobj = None
fname = None
mime = None
if content_type and self.mimes.has_key(content_type):
mime = content_type
fobj = self.mimes[content_type]
elif filename:
bname,ext = os.path.splitext(filename)
if self.exts.has_key(ext):
fobj = self.exts[ext]
mime = fobj._getDefMime(ext)
if content_type and not fobj:
mime,fobj = mime_match(content_type, self.mimes)
if not fobj:
try:
if realfname :
fname = realfname
else:
try:
bname,ext = os.path.splitext(filename or 'test.tmp')
except Exception:
bname, ext = filename, 'tmp'
fd, fname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE)
(result, _) = pop.communicate()
mime2 = result.split(';')[0]
_logger.debug('File gives us: %s', mime2)
# Note that the temporary file still exists now.
mime,fobj = mime_match(mime2, self.mimes)
if not mime:
mime = mime2
except Exception:
_logger.exception('Cannot determine mime type.')
try:
if fobj:
res = (mime, fobj.indexContent(content,filename,fname or realfname) )
else:
_logger.debug("Have no object, return (%s, None).", mime)
res = (mime, '')
except Exception:
_logger.exception("Cannot index file %s (%s).",
filename, fname or realfname)
res = (mime, '')
# If we created a tmp file, unlink it now
if not realfname and fname:
try:
os.unlink(fname)
except Exception:
_logger.exception("Cannot unlink %s.", fname)
return res
cntIndex = contentIndex()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
structRecomputation/computations | refs/heads/master | _modules/auxiliary_monte.py | 1 | import numpy as np
def write_monte(spec, rslt):
""" Write out results to table.
"""
num_evals = rslt['num_evals']
num_steps = rslt['num_steps']
points = rslt['points']
stat = rslt['rmse']
# Write out information.
fname = 'table_4.' + str(spec + 1) + '.txt'
with open(fname, 'w') as f:
# Write out the heading of the table.
args = list()
args += ['Identifier', 'True Value', 'Mean', 'Bias', 't-Stat.', 'Std.']
fmt_ = ' {:>15}' + ' {:>15}' * 5 + '\n\n'
line = fmt_.format(*args)
f.write(line)
fmt_ = ' {:>15}' + ' {:15.4f}' * 5 + '\n'
for i in range(26):
args = list()
args += [str(i)]
args += [points['true'][i]]
args += [points['mean'][i]]
args += [points['bias'][i]]
args += [points['stat'][i]]
args += [points['stan'][i]]
line = fmt_.format(*args)
f.write(line)
string = '\n\n\n {0[0]:>15} {0[1]:>15} {0[2]:>15}\n\n'
f.write(string.format(['RMSE', 'Evaluations', 'Steps']))
string = ' {:15.4f} {:>15} {:>15}'
f.write(string.format(*[stat, int(num_evals), int(num_steps)]))
def process_monte(x_iter, x_true):
""" Process results from to bootstrap iterations to fill the table with
the required information.
"""
# Initialize dictionary as the results container.
rslt = dict()
for key_ in ['mean', 'true', 'bias', 'stan', 'stat', 'msta']:
rslt[key_] = [None] * 26
# Attach the results from each individual bootstrap run to the dictionary.
rslt['x_iter'] = np.array(x_iter, ndmin=2)
# Construct auxiliary objects
rslt['mean'] = np.mean(rslt['x_iter'], axis=0)
num_boots = len(rslt['x_iter'])
# Construct the requested information.
for i in range(26):
# true parameter and bias
rslt['true'][i] = x_true[i]
rslt['bias'][i] = rslt['mean'][i] - rslt['true'][i]
# standard deviation
rslt['stan'][i] = 0.0
for j in range(num_boots):
rslt['stan'][i] += (rslt['x_iter'][j, i] - rslt['mean'][i]) ** 2
try:
rslt['stan'][i] = np.sqrt((1.0 / (num_boots - 1)) * rslt['stan'][i])
except ZeroDivisionError:
rslt['stan'][i] = 0.0
# t-statistic
if rslt['stan'][i] == 0.0:
rslt['stat'][i] = 0.0
else:
rslt['stat'][i] = ((rslt['mean'][i] - x_true[i]) / rslt['stan'][i])
rslt['stat'][i] = rslt['stat'][i] * np.sqrt(num_boots)
return rslt
|
emadehsan/ieighteen | refs/heads/master | translate.py | 1 |
'''
@author Emad Ehsan
1. Picks up a line from sourceLang file, e.g. './en/strings.txt'
2. Request google to translate the query
3. Creates a file for targetLang, e.g. './ur/strings.txt'
4. Places translation at exact line number to targetLang file
'''
from html.parser import HTMLParser
import requests
from requests.utils import quote
import binascii
counter = 1
# Locale code of source and target languages
sourceLang = 'en'
targetLang = 'ur'
# Put file name here
filename = 'strings.txt'
'''
For each line in sourceLang file, getTranslation
and put translated line targetLang file
'''
def translate(infile, outfile):
with open(infile) as fin:
with open(outfile, 'wb') as fout:
for line in fin:
outline = bytes(line, 'utf-8')
# print(line)
line = line.strip()
if len(line) == 0:
continue
# now line is prepared to be translated
translation = getTranslation(line)
# add new line at the end
outline = translation + bytes('\n', 'utf-8')
# save
fout.write(outline)
'''
Translates via google translate as described here
https://ctrlq.org/code/19909-google-translate-api
'''
def getTranslation(sentence):
global counter, sourceLang, targetLang
url = "https://translate.googleapis.com/translate_a/single?client=gtx&sl=" + sourceLang
url = url + "&tl=" + targetLang + "&dt=t&q=" + quote(sentence);
print('Request# ' + str(counter) + ': ' + url)
counter += 1
page = requests.get(url)
# strip the response to extract urdu text along with quotes
translation = page.content
translation = translation[3:]
removeLast = 16 + len(sentence)
translation = translation[:-removeLast]
# still has a trailing comma
if (translation[-1] == ','):
translation = translation[:-1]
return translation
def main():
global filename
infile = './' + sourceLang + '/' + filename
outfile = './' + targetLang + '/' + filename
translate(infile, outfile)
main()
|
bytor99999/vertx-web | refs/heads/master | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/setuptools/tests/test_resources.py | 345 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# NOTE: the shebang and encoding lines are for ScriptHeaderTests do not remove
import os
import sys
import tempfile
import shutil
from unittest import TestCase
import pkg_resources
from pkg_resources import (parse_requirements, VersionConflict, parse_version,
Distribution, EntryPoint, Requirement, safe_version, safe_name,
WorkingSet)
from setuptools.command.easy_install import (get_script_header, is_sh,
nt_quote_arg)
from setuptools.compat import StringIO, iteritems
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
def safe_repr(obj, short=False):
""" copied from Python2.7"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < pkg_resources._MAX_LENGTH:
return result
return result[:pkg_resources._MAX_LENGTH] + ' [truncated]...'
class Metadata(pkg_resources.EmptyProvider):
"""Mock object to return metadata as if from an on-disk distribution"""
def __init__(self,*pairs):
self.metadata = dict(pairs)
def has_metadata(self,name):
return name in self.metadata
def get_metadata(self,name):
return self.metadata[name]
def get_metadata_lines(self,name):
return pkg_resources.yield_lines(self.get_metadata(name))
dist_from_fn = pkg_resources.Distribution.from_filename
class DistroTests(TestCase):
def testCollection(self):
# empty path should produce no distributions
ad = pkg_resources.Environment([], platform=None, python=None)
self.assertEqual(list(ad), [])
self.assertEqual(ad['FooPkg'],[])
ad.add(dist_from_fn("FooPkg-1.3_1.egg"))
ad.add(dist_from_fn("FooPkg-1.4-py2.4-win32.egg"))
ad.add(dist_from_fn("FooPkg-1.2-py2.4.egg"))
# Name is in there now
self.assertTrue(ad['FooPkg'])
# But only 1 package
self.assertEqual(list(ad), ['foopkg'])
# Distributions sort by version
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2']
)
# Removing a distribution leaves sequence alone
ad.remove(ad['FooPkg'][1])
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.2']
)
# And inserting adds them in order
ad.add(dist_from_fn("FooPkg-1.9.egg"))
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2']
)
ws = WorkingSet([])
foo12 = dist_from_fn("FooPkg-1.2-py2.4.egg")
foo14 = dist_from_fn("FooPkg-1.4-py2.4-win32.egg")
req, = parse_requirements("FooPkg>=1.3")
# Nominal case: no distros on path, should yield all applicable
self.assertEqual(ad.best_match(req,ws).version, '1.9')
# If a matching distro is already installed, should return only that
ws.add(foo14)
self.assertEqual(ad.best_match(req,ws).version, '1.4')
# If the first matching distro is unsuitable, it's a version conflict
ws = WorkingSet([])
ws.add(foo12)
ws.add(foo14)
self.assertRaises(VersionConflict, ad.best_match, req, ws)
# If more than one match on the path, the first one takes precedence
ws = WorkingSet([])
ws.add(foo14)
ws.add(foo12)
ws.add(foo14)
self.assertEqual(ad.best_match(req,ws).version, '1.4')
def checkFooPkg(self,d):
self.assertEqual(d.project_name, "FooPkg")
self.assertEqual(d.key, "foopkg")
self.assertEqual(d.version, "1.3-1")
self.assertEqual(d.py_version, "2.4")
self.assertEqual(d.platform, "win32")
self.assertEqual(d.parsed_version, parse_version("1.3-1"))
def testDistroBasics(self):
d = Distribution(
"/some/path",
project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32"
)
self.checkFooPkg(d)
d = Distribution("/some/path")
self.assertEqual(d.py_version, sys.version[:3])
self.assertEqual(d.platform, None)
def testDistroParse(self):
d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg")
self.checkFooPkg(d)
d = dist_from_fn("FooPkg-1.3_1-py2.4-win32.egg-info")
self.checkFooPkg(d)
def testDistroMetadata(self):
d = Distribution(
"/some/path", project_name="FooPkg", py_version="2.4", platform="win32",
metadata = Metadata(
('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n")
)
)
self.checkFooPkg(d)
def distRequires(self, txt):
return Distribution("/foo", metadata=Metadata(('depends.txt', txt)))
def checkRequires(self, dist, txt, extras=()):
self.assertEqual(
list(dist.requires(extras)),
list(parse_requirements(txt))
)
def testDistroDependsSimple(self):
for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0":
self.checkRequires(self.distRequires(v), v)
def testResolve(self):
ad = pkg_resources.Environment([])
ws = WorkingSet([])
# Resolving no requirements -> nothing to install
self.assertEqual(list(ws.resolve([],ad)), [])
# Request something not in the collection -> DistributionNotFound
self.assertRaises(
pkg_resources.DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad
)
Foo = Distribution.from_filename(
"/foo_dir/Foo-1.2.egg",
metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
)
ad.add(Foo)
ad.add(Distribution.from_filename("Foo-0.9.egg"))
# Request thing(s) that are available -> list to activate
for i in range(3):
targets = list(ws.resolve(parse_requirements("Foo"), ad))
self.assertEqual(targets, [Foo])
list(map(ws.add,targets))
self.assertRaises(VersionConflict, ws.resolve,
parse_requirements("Foo==0.9"), ad)
ws = WorkingSet([]) # reset
# Request an extra that causes an unresolved dependency for "Baz"
self.assertRaises(
pkg_resources.DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad
)
Baz = Distribution.from_filename(
"/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
)
ad.add(Baz)
# Activation list now includes resolved dependency
self.assertEqual(
list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz]
)
# Requests for conflicting versions produce VersionConflict
self.assertRaises(VersionConflict,
ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad)
def testDistroDependsOptions(self):
d = self.distRequires("""
Twisted>=1.5
[docgen]
ZConfig>=2.0
docutils>=0.3
[fastcgi]
fcgiapp>=0.1""")
self.checkRequires(d,"Twisted>=1.5")
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(),
["docgen","fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(),
["fastcgi", "docgen"]
)
self.assertRaises(pkg_resources.UnknownExtra, d.requires, ["foo"])
class EntryPointTests(TestCase):
def assertfields(self, ep):
self.assertEqual(ep.name,"foo")
self.assertEqual(ep.module_name,"setuptools.tests.test_resources")
self.assertEqual(ep.attrs, ("EntryPointTests",))
self.assertEqual(ep.extras, ("x",))
self.assertTrue(ep.load() is EntryPointTests)
self.assertEqual(
str(ep),
"foo = setuptools.tests.test_resources:EntryPointTests [x]"
)
def setUp(self):
self.dist = Distribution.from_filename(
"FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]')))
def testBasics(self):
ep = EntryPoint(
"foo", "setuptools.tests.test_resources", ["EntryPointTests"],
["x"], self.dist
)
self.assertfields(ep)
def testParse(self):
s = "foo = setuptools.tests.test_resources:EntryPointTests [x]"
ep = EntryPoint.parse(s, self.dist)
self.assertfields(ep)
ep = EntryPoint.parse("bar baz= spammity[PING]")
self.assertEqual(ep.name,"bar baz")
self.assertEqual(ep.module_name,"spammity")
self.assertEqual(ep.attrs, ())
self.assertEqual(ep.extras, ("ping",))
ep = EntryPoint.parse(" fizzly = wocka:foo")
self.assertEqual(ep.name,"fizzly")
self.assertEqual(ep.module_name,"wocka")
self.assertEqual(ep.attrs, ("foo",))
self.assertEqual(ep.extras, ())
def testRejects(self):
for ep in [
"foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2",
]:
try: EntryPoint.parse(ep)
except ValueError: pass
else: raise AssertionError("Should've been bad", ep)
def checkSubMap(self, m):
self.assertEqual(len(m), len(self.submap_expect))
for key, ep in iteritems(self.submap_expect):
self.assertEqual(repr(m.get(key)), repr(ep))
submap_expect = dict(
feature1=EntryPoint('feature1', 'somemodule', ['somefunction']),
feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']),
feature3=EntryPoint('feature3', 'this.module', extras=['something'])
)
submap_str = """
# define features for blah blah
feature1 = somemodule:somefunction
feature2 = another.module:SomeClass [extra1,extra2]
feature3 = this.module [something]
"""
def testParseList(self):
self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str))
self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar")
self.assertRaises(ValueError, EntryPoint.parse_group, "x",
["foo=baz", "foo=bar"])
def testParseMap(self):
m = EntryPoint.parse_map({'xyz':self.submap_str})
self.checkSubMap(m['xyz'])
self.assertEqual(list(m.keys()),['xyz'])
m = EntryPoint.parse_map("[xyz]\n"+self.submap_str)
self.checkSubMap(m['xyz'])
self.assertEqual(list(m.keys()),['xyz'])
self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"])
self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str)
class RequirementsTests(TestCase):
def testBasics(self):
r = Requirement.parse("Twisted>=1.2")
self.assertEqual(str(r),"Twisted>=1.2")
self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')")
self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ()))
self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ()))
self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2"))
def testOrdering(self):
r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ())
r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ())
self.assertEqual(r1,r2)
self.assertEqual(str(r1),str(r2))
self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2")
def testBasicContains(self):
r = Requirement("Twisted", [('>=','1.2')], ())
foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg")
twist11 = Distribution.from_filename("Twisted-1.1.egg")
twist12 = Distribution.from_filename("Twisted-1.2.egg")
self.assertTrue(parse_version('1.2') in r)
self.assertTrue(parse_version('1.1') not in r)
self.assertTrue('1.2' in r)
self.assertTrue('1.1' not in r)
self.assertTrue(foo_dist not in r)
self.assertTrue(twist11 not in r)
self.assertTrue(twist12 in r)
def testAdvancedContains(self):
r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5")
for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'):
self.assertTrue(v in r, (v,r))
for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'):
self.assertTrue(v not in r, (v,r))
def testOptionsAndHashing(self):
r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0")
self.assertEqual(r1,r2)
self.assertEqual(r1,r3)
self.assertEqual(r1.extras, ("foo","bar"))
self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized
self.assertEqual(hash(r1), hash(r2))
self.assertEqual(
hash(r1), hash(("twisted", ((">=",parse_version("1.2")),),
frozenset(["foo","bar"])))
)
def testVersionEquality(self):
r1 = Requirement.parse("foo==0.3a2")
r2 = Requirement.parse("foo!=0.3a4")
d = Distribution.from_filename
self.assertTrue(d("foo-0.3a4.egg") not in r1)
self.assertTrue(d("foo-0.3a1.egg") not in r1)
self.assertTrue(d("foo-0.3a4.egg") not in r2)
self.assertTrue(d("foo-0.3a2.egg") in r1)
self.assertTrue(d("foo-0.3a2.egg") in r2)
self.assertTrue(d("foo-0.3a3.egg") in r2)
self.assertTrue(d("foo-0.3a5.egg") in r2)
def testSetuptoolsProjectName(self):
"""
The setuptools project should implement the setuptools package.
"""
self.assertEqual(
Requirement.parse('setuptools').project_name, 'setuptools')
# setuptools 0.7 and higher means setuptools.
self.assertEqual(
Requirement.parse('setuptools == 0.7').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools == 0.7a1').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools >= 0.7').project_name, 'setuptools')
class ParseTests(TestCase):
def testEmptyParse(self):
self.assertEqual(list(parse_requirements('')), [])
def testYielding(self):
for inp,out in [
([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']),
(['x\n\n','y'], ['x','y']),
]:
self.assertEqual(list(pkg_resources.yield_lines(inp)),out)
def testSplitting(self):
sample = """
x
[Y]
z
a
[b ]
# foo
c
[ d]
[q]
v
"""
self.assertEqual(list(pkg_resources.split_sections(sample)),
[(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])]
)
self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo"))
def testSafeName(self):
self.assertEqual(safe_name("adns-python"), "adns-python")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker")
self.assertNotEqual(safe_name("peak.web"), "peak-web")
def testSafeVersion(self):
self.assertEqual(safe_version("1.2-1"), "1.2-1")
self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha")
self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521")
self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker")
self.assertEqual(safe_version("peak.web"), "peak.web")
def testSimpleRequirements(self):
self.assertEqual(
list(parse_requirements('Twis-Ted>=1.2-1')),
[Requirement('Twis-Ted',[('>=','1.2-1')], ())]
)
self.assertEqual(
list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')),
[Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())]
)
self.assertEqual(
Requirement.parse("FooBar==1.99a3"),
Requirement("FooBar", [('==','1.99a3')], ())
)
self.assertRaises(ValueError,Requirement.parse,">=2.3")
self.assertRaises(ValueError,Requirement.parse,"x\\")
self.assertRaises(ValueError,Requirement.parse,"x==2 q")
self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2")
self.assertRaises(ValueError,Requirement.parse,"#")
def testVersionEquality(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assertEqual(p1,p2, (s1,s2,p1,p2))
c('1.2-rc1', '1.2rc1')
c('0.4', '0.4.0')
c('0.4.0.0', '0.4.0')
c('0.4.0-0', '0.4-0')
c('0pl1', '0.0pl1')
c('0pre1', '0.0c1')
c('0.0.0preview1', '0c1')
c('0.0c1', '0-rc1')
c('1.2a1', '1.2.a.1')
c('1.2...a', '1.2a')
def testVersionOrdering(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assertTrue(p1<p2, (s1,s2,p1,p2))
c('2.1','2.1.1')
c('2a1','2b0')
c('2a1','2.1')
c('2.3a1', '2.3')
c('2.1-1', '2.1-2')
c('2.1-1', '2.1.1')
c('2.1', '2.1pl4')
c('2.1a0-20040501', '2.1')
c('1.1', '02.1')
c('A56','B27')
c('3.2', '3.2.pl0')
c('3.2-1', '3.2pl1')
c('3.2pl1', '3.2pl1-1')
c('0.4', '4.0')
c('0.0.4', '0.4.0')
c('0pl1', '0.4pl1')
c('2.1.0-rc1','2.1.0')
c('2.1dev','2.1a0')
torture ="""
0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1
0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2
0.77.2-1 0.77.1-1 0.77.0-1
""".split()
for p,v1 in enumerate(torture):
for v2 in torture[p+1:]:
c(v2,v1)
class ScriptHeaderTests(TestCase):
non_ascii_exe = '/Users/José/bin/python'
exe_with_spaces = r'C:\Program Files\Python33\python.exe'
def test_get_script_header(self):
if not sys.platform.startswith('java') or not is_sh(sys.executable):
# This test is for non-Jython platforms
expected = '#!%s\n' % nt_quote_arg(os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/local/bin/python'),
expected)
expected = '#!%s -x\n' % nt_quote_arg(os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/bin/python -x'),
expected)
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
candidate = get_script_header('#!/usr/bin/python',
executable=self.exe_with_spaces)
self.assertEqual(candidate, '#!"%s"\n' % self.exe_with_spaces)
def test_get_script_header_jython_workaround(self):
# This test doesn't work with Python 3 in some locales
if (sys.version_info >= (3,) and os.environ.get("LC_CTYPE")
in (None, "C", "POSIX")):
return
class java:
class lang:
class System:
@staticmethod
def getProperty(property):
return ""
sys.modules["java"] = java
platform = sys.platform
sys.platform = 'java1.5.0_13'
stdout, stderr = sys.stdout, sys.stderr
try:
# A mock sys.executable that uses a shebang line (this file)
exe = os.path.normpath(os.path.splitext(__file__)[0] + '.py')
self.assertEqual(
get_script_header('#!/usr/local/bin/python', executable=exe),
'#!/usr/bin/env %s\n' % exe)
# Ensure we generate what is basically a broken shebang line
# when there's options, with a warning emitted
sys.stdout = sys.stderr = StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python -x',
executable=exe),
'#!%s -x\n' % exe)
self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue())
sys.stdout = sys.stderr = StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue())
finally:
del sys.modules["java"]
sys.platform = platform
sys.stdout, sys.stderr = stdout, stderr
class NamespaceTests(TestCase):
def setUp(self):
self._ns_pkgs = pkg_resources._namespace_packages.copy()
self._tmpdir = tempfile.mkdtemp(prefix="tests-setuptools-")
os.makedirs(os.path.join(self._tmpdir, "site-pkgs"))
self._prev_sys_path = sys.path[:]
sys.path.append(os.path.join(self._tmpdir, "site-pkgs"))
def tearDown(self):
shutil.rmtree(self._tmpdir)
pkg_resources._namespace_packages = self._ns_pkgs.copy()
sys.path = self._prev_sys_path[:]
def _assertIn(self, member, container):
""" assertIn and assertTrue does not exist in Python2.3"""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def test_two_levels_deep(self):
"""
Test nested namespace packages
Create namespace packages in the following tree :
site-packages-1/pkg1/pkg2
site-packages-2/pkg1/pkg2
Check both are in the _namespace_packages dict and that their __path__
is correct
"""
sys.path.append(os.path.join(self._tmpdir, "site-pkgs2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2"))
ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n"
for site in ["site-pkgs", "site-pkgs2"]:
pkg1_init = open(os.path.join(self._tmpdir, site,
"pkg1", "__init__.py"), "w")
pkg1_init.write(ns_str)
pkg1_init.close()
pkg2_init = open(os.path.join(self._tmpdir, site,
"pkg1", "pkg2", "__init__.py"), "w")
pkg2_init.write(ns_str)
pkg2_init.close()
import pkg1
self._assertIn("pkg1", pkg_resources._namespace_packages.keys())
try:
import pkg1.pkg2
except ImportError:
self.fail("Setuptools tried to import the parent namespace package")
# check the _namespace_packages dict
self._assertIn("pkg1.pkg2", pkg_resources._namespace_packages.keys())
self.assertEqual(pkg_resources._namespace_packages["pkg1"], ["pkg1.pkg2"])
# check the __path__ attribute contains both paths
self.assertEqual(pkg1.pkg2.__path__, [
os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"),
os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2")])
|
edisonlz/fruit | refs/heads/master | web_project/base/site-packages/django/db/backends/sqlite3/introspection.py | 114 | import re
from django.db.backends import BaseDatabaseIntrospection, FieldInfo
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [FieldInfo(info['name'], info['type'], None, info['size'], None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li+1:ri]
for other_index, other_desc in enumerate(other_table_results.split(',')):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
name = other_desc.split(' ', 1)[0].strip('"')
if name == column:
relations[field_index] = (other_index, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple([s.strip('"') for s in m.groups()]))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': False,
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
|
Edu-Glez/Bank_sentiment_analysis | refs/heads/master | env/lib/python3.6/site-packages/pip/_vendor/webencodings/x_user_defined.py | 341 | # coding: utf8
"""
webencodings.x_user_defined
~~~~~~~~~~~~~~~~~~~~~~~~~~~
An implementation of the x-user-defined encoding.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
codec_info = codecs.CodecInfo(
name='x-user-defined',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
# Python 3:
# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700))
decoding_table = (
'\x00'
'\x01'
'\x02'
'\x03'
'\x04'
'\x05'
'\x06'
'\x07'
'\x08'
'\t'
'\n'
'\x0b'
'\x0c'
'\r'
'\x0e'
'\x0f'
'\x10'
'\x11'
'\x12'
'\x13'
'\x14'
'\x15'
'\x16'
'\x17'
'\x18'
'\x19'
'\x1a'
'\x1b'
'\x1c'
'\x1d'
'\x1e'
'\x1f'
' '
'!'
'"'
'#'
'$'
'%'
'&'
"'"
'('
')'
'*'
'+'
','
'-'
'.'
'/'
'0'
'1'
'2'
'3'
'4'
'5'
'6'
'7'
'8'
'9'
':'
';'
'<'
'='
'>'
'?'
'@'
'A'
'B'
'C'
'D'
'E'
'F'
'G'
'H'
'I'
'J'
'K'
'L'
'M'
'N'
'O'
'P'
'Q'
'R'
'S'
'T'
'U'
'V'
'W'
'X'
'Y'
'Z'
'['
'\\'
']'
'^'
'_'
'`'
'a'
'b'
'c'
'd'
'e'
'f'
'g'
'h'
'i'
'j'
'k'
'l'
'm'
'n'
'o'
'p'
'q'
'r'
's'
't'
'u'
'v'
'w'
'x'
'y'
'z'
'{'
'|'
'}'
'~'
'\x7f'
'\uf780'
'\uf781'
'\uf782'
'\uf783'
'\uf784'
'\uf785'
'\uf786'
'\uf787'
'\uf788'
'\uf789'
'\uf78a'
'\uf78b'
'\uf78c'
'\uf78d'
'\uf78e'
'\uf78f'
'\uf790'
'\uf791'
'\uf792'
'\uf793'
'\uf794'
'\uf795'
'\uf796'
'\uf797'
'\uf798'
'\uf799'
'\uf79a'
'\uf79b'
'\uf79c'
'\uf79d'
'\uf79e'
'\uf79f'
'\uf7a0'
'\uf7a1'
'\uf7a2'
'\uf7a3'
'\uf7a4'
'\uf7a5'
'\uf7a6'
'\uf7a7'
'\uf7a8'
'\uf7a9'
'\uf7aa'
'\uf7ab'
'\uf7ac'
'\uf7ad'
'\uf7ae'
'\uf7af'
'\uf7b0'
'\uf7b1'
'\uf7b2'
'\uf7b3'
'\uf7b4'
'\uf7b5'
'\uf7b6'
'\uf7b7'
'\uf7b8'
'\uf7b9'
'\uf7ba'
'\uf7bb'
'\uf7bc'
'\uf7bd'
'\uf7be'
'\uf7bf'
'\uf7c0'
'\uf7c1'
'\uf7c2'
'\uf7c3'
'\uf7c4'
'\uf7c5'
'\uf7c6'
'\uf7c7'
'\uf7c8'
'\uf7c9'
'\uf7ca'
'\uf7cb'
'\uf7cc'
'\uf7cd'
'\uf7ce'
'\uf7cf'
'\uf7d0'
'\uf7d1'
'\uf7d2'
'\uf7d3'
'\uf7d4'
'\uf7d5'
'\uf7d6'
'\uf7d7'
'\uf7d8'
'\uf7d9'
'\uf7da'
'\uf7db'
'\uf7dc'
'\uf7dd'
'\uf7de'
'\uf7df'
'\uf7e0'
'\uf7e1'
'\uf7e2'
'\uf7e3'
'\uf7e4'
'\uf7e5'
'\uf7e6'
'\uf7e7'
'\uf7e8'
'\uf7e9'
'\uf7ea'
'\uf7eb'
'\uf7ec'
'\uf7ed'
'\uf7ee'
'\uf7ef'
'\uf7f0'
'\uf7f1'
'\uf7f2'
'\uf7f3'
'\uf7f4'
'\uf7f5'
'\uf7f6'
'\uf7f7'
'\uf7f8'
'\uf7f9'
'\uf7fa'
'\uf7fb'
'\uf7fc'
'\uf7fd'
'\uf7fe'
'\uf7ff'
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
|
shhui/nova | refs/heads/master | nova/tests/scheduler/test_filter_scheduler.py | 2 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Filter Scheduler.
"""
import contextlib
import mock
import uuid
import mox
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
from nova.objects import instance_group as instance_group_obj
from nova.pci import pci_request
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import utils as scheduler_utils
from nova.scheduler import weights
from nova.tests import fake_instance
from nova.tests.scheduler import fakes
from nova.tests.scheduler import test_scheduler
def fake_get_filtered_hosts(hosts, filter_properties, index):
return list(hosts)
def fake_get_group_filtered_hosts(hosts, filter_properties, index):
group_hosts = filter_properties.get('group_hosts') or []
if group_hosts:
hosts = list(hosts)
hosts.pop(0)
return hosts
else:
return list(hosts)
def fake_get_group_filtered_affinity_hosts(hosts, filter_properties, index):
group_hosts = filter_properties.get('group_hosts') or []
if group_hosts:
affinity_host = hosts.pop(0)
return [affinity_host]
else:
return list(hosts)
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
driver_cls = filter_scheduler.FilterScheduler
def test_run_instance_no_hosts(self):
def _fake_empty_call_zone_method(*args, **kwargs):
return []
sched = fakes.FakeFilterScheduler()
uuid = 'fake-uuid1'
fake_context = context.RequestContext('user', 'project')
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
'ephemeral_gb': 0},
'instance_properties': instance_properties,
'instance_uuids': [uuid]}
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
old_ref, new_ref = db.instance_update_and_get_original(fake_context,
uuid, {'vm_state': vm_states.ERROR, 'task_state':
None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None,
None, None, {}, False)
def test_run_instance_non_admin(self):
self.was_admin = False
def fake_get(context, *args, **kwargs):
# make sure this is called with admin context, even though
# we're using user context below
self.was_admin = context.is_admin
return {}
sched = fakes.FakeFilterScheduler()
self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get)
fake_context = context.RequestContext('user', 'project')
uuid = 'fake-uuid1'
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
'instance_properties': instance_properties,
'instance_uuids': [uuid]}
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
old_ref, new_ref = db.instance_update_and_get_original(fake_context,
uuid, {'vm_state': vm_states.ERROR, 'task_state':
None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {}, False)
self.assertTrue(self.was_admin)
def test_scheduler_includes_launch_index(self):
fake_context = context.RequestContext('user', 'project')
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
def _has_launch_index(expected_index):
"""Return a function that verifies the expected index."""
def _check_launch_index(value):
if 'instance_properties' in value:
if 'launch_index' in value['instance_properties']:
index = value['instance_properties']['launch_index']
if index == expected_index:
return True
return False
return _check_launch_index
self.mox.StubOutWithMock(self.driver, '_schedule')
self.mox.StubOutWithMock(self.driver, '_provision_resource')
self.driver._schedule(fake_context, request_spec, {},
['fake-uuid1', 'fake-uuid2']).AndReturn(['host1', 'host2'])
# instance 1
self.driver._provision_resource(
fake_context, 'host1',
mox.Func(_has_launch_index(0)), {},
None, None, None, None,
instance_uuid='fake-uuid1',
legacy_bdm_in_spec=False).AndReturn(instance1)
# instance 2
self.driver._provision_resource(
fake_context, 'host2',
mox.Func(_has_launch_index(1)), {},
None, None, None, None,
instance_uuid='fake-uuid2',
legacy_bdm_in_spec=False).AndReturn(instance2)
self.mox.ReplayAll()
self.driver.schedule_run_instance(fake_context, request_spec,
None, None, None, None, {}, False)
def test_schedule_happy_day(self):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through.
"""
self.next_weight = 1.0
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'num_instances': 10,
'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0,
'vcpus': 1},
'instance_properties': {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}}
self.mox.ReplayAll()
weighed_hosts = sched._schedule(fake_context, request_spec, {})
self.assertEqual(len(weighed_hosts), 10)
for weighed_host in weighed_hosts:
self.assertIsNotNone(weighed_host.obj)
def test_max_attempts(self):
self.flags(scheduler_max_attempts=4)
sched = fakes.FakeFilterScheduler()
self.assertEqual(4, sched._max_attempts())
def test_invalid_max_attempts(self):
self.flags(scheduler_max_attempts=0)
sched = fakes.FakeFilterScheduler()
self.assertRaises(exception.NovaException, sched._max_attempts)
def test_retry_disabled(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=1)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertNotIn("retry", filter_properties)
def test_retry_force_hosts(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties)
filter_properties = dict(force_hosts=['force_host'])
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertNotIn("retry", filter_properties)
def test_retry_force_nodes(self):
# Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties)
filter_properties = dict(force_nodes=['force_node'])
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertNotIn("retry", filter_properties)
def test_retry_attempt_one(self):
# Test retry logic on initial scheduling attempt.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(1, num_attempts)
def test_retry_attempt_two(self):
# Test retry logic when re-scheduling.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(2, num_attempts)
def test_retry_exceeded_max_attempts(self):
# Test for necessary explosion when max retries is exceeded and that
# the information needed in request_spec is still present for error
# handling
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {'project_id': '12345', 'os_type': 'Linux'}
instance_uuids = ['fake-id']
request_spec = dict(instance_properties=instance_properties,
instance_uuids=instance_uuids)
retry = dict(num_attempts=2)
filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
self.context, request_spec, admin_password=None,
injected_files=None, requested_networks=None,
is_first_time=False,
filter_properties=filter_properties,
legacy_bdm_in_spec=False)
uuids = request_spec.get('instance_uuids')
self.assertEqual(uuids, instance_uuids)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)
host = "fakehost"
node = "fakenode"
scheduler_utils._add_retry_host(filter_properties, host, node)
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
# Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
host_state = host_manager.HostState('host', 'node')
host_state.limits['vcpus'] = 5
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
self.assertEqual(['host', 'node'],
filter_properties['retry']['hosts'][0])
self.assertEqual({'vcpus': 5}, host_state.limits)
def test_group_details_in_filter_properties(self):
sched = fakes.FakeFilterScheduler()
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = instance_group_obj.InstanceGroup()
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = ['anti-affinity']
filter_properties = {
'scheduler_hints': {
'group': group.uuid,
},
'group_hosts': ['hostB'],
}
with contextlib.nested(
mock.patch.object(instance_group_obj.InstanceGroup, 'get_by_uuid',
return_value=group),
mock.patch.object(instance_group_obj.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
update_group_hosts = sched._setup_instance_group(self.context,
filter_properties)
self.assertTrue(update_group_hosts)
self.assertEqual(set(['hostA', 'hostB']),
filter_properties['group_hosts'])
self.assertEqual(['anti-affinity'],
filter_properties['group_policies'])
def test_schedule_host_pool(self):
"""Make sure the scheduler_host_subset_size property works properly."""
self.flags(scheduler_host_subset_size=2)
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.ReplayAll()
hosts = sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chosen
self.assertEqual(len(hosts), 1)
def test_schedule_large_host_pool(self):
"""Hosts should still be chosen if pool size
is larger than number of filtered hosts.
"""
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.flags(scheduler_host_subset_size=20)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.ReplayAll()
hosts = sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chose
self.assertEqual(len(hosts), 1)
def test_schedule_chooses_best_host(self):
"""If scheduler_host_subset_size is 1, the largest host with greatest
weight should be returned.
"""
self.flags(scheduler_host_subset_size=1)
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
self.next_weight = 50
def _fake_weigh_objects(_self, functions, hosts, options):
this_weight = self.next_weight
self.next_weight = 0
host_state = hosts[0]
return [weights.WeighedHost(host_state, this_weight)]
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
filter_properties = {}
self.mox.ReplayAll()
hosts = sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chosen
self.assertEqual(1, len(hosts))
self.assertEqual(50, hosts[0].weight)
def test_select_destinations(self):
"""select_destinations is basically a wrapper around _schedule().
Similar to the _schedule tests, this just does a happy path test to
ensure there is nothing glaringly wrong.
"""
self.next_weight = 1.0
selected_hosts = []
selected_nodes = []
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
selected_hosts.append(host_state.host)
selected_nodes.append(host_state.nodename)
return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0,
'vcpus': 1},
'instance_properties': {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'},
'num_instances': 1}
self.mox.ReplayAll()
dests = sched.select_destinations(fake_context, request_spec, {})
(host, node) = (dests[0]['host'], dests[0]['nodename'])
self.assertEqual(host, selected_hosts[0])
self.assertEqual(node, selected_nodes[0])
def test_select_destinations_no_valid_host(self):
def _return_no_host(*args, **kwargs):
return []
self.stubs.Set(self.driver, '_schedule', _return_no_host)
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
{'num_instances': 1}, {})
def test_handles_deleted_instance(self):
"""Test instance deletion while being scheduled."""
def _raise_instance_not_found(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='123')
self.stubs.Set(driver, 'instance_update_db',
_raise_instance_not_found)
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
host_state = host_manager.HostState('host2', 'node2')
weighted_host = weights.WeighedHost(host_state, 1.42)
filter_properties = {}
uuid = 'fake-uuid1'
instance_properties = {'project_id': 1, 'os_type': 'Linux'}
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
'instance_properties': instance_properties,
'instance_uuids': [uuid]}
sched._provision_resource(fake_context, weighted_host,
request_spec, filter_properties,
None, None, None, None)
def test_pci_request_in_filter_properties(self):
instance_type = {}
request_spec = {'instance_type': instance_type,
'instance_properties': {'project_id': 1,
'os_type': 'Linux'}}
filter_properties = {}
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
self.mox.StubOutWithMock(pci_request, 'get_pci_requests_from_flavor')
pci_request.get_pci_requests_from_flavor(
instance_type).AndReturn(requests)
self.mox.ReplayAll()
self.driver.populate_filter_properties(
request_spec, filter_properties)
self.assertEqual(filter_properties.get('pci_requests'),
requests)
|
feigames/Odoo | refs/heads/master | addons/web/tests/__init__.py | 54 | # -*- coding: utf-8 -*-
import test_js
# import test_menu
import test_serving_base
|
DepthDeluxe/ansible | refs/heads/devel | lib/ansible/modules/web_infrastructure/jboss.py | 21 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: jboss
version_added: "1.4"
short_description: deploy applications to JBoss
description:
- Deploy applications to JBoss standalone using the filesystem
options:
deployment:
required: true
description:
- The name of the deployment
src:
required: false
description:
- The remote path of the application ear or war to deploy
deploy_path:
required: false
default: /var/lib/jbossas/standalone/deployments
description:
- The location in the filesystem where the deployment scanner listens
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the application should be deployed or undeployed
notes:
- "The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
- "Ensure no identically named application is deployed through the JBoss CLI"
author: "Jeroen Hoekx (@jhoekx)"
"""
EXAMPLES = """
# Deploy a hello world application
- jboss:
src: /tmp/hello-1.0-SNAPSHOT.war
deployment: hello.war
state: present
# Update the hello world application
- jboss:
src: /tmp/hello-1.1-SNAPSHOT.war
deployment: hello.war
state: present
# Undeploy the hello world application
- jboss:
deployment: hello.war
state: absent
"""
import os
import shutil
import time
from ansible.module_utils.basic import AnsibleModule
def is_deployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment))
def is_undeployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment))
def is_failed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment))
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path'),
deployment=dict(required=True),
deploy_path=dict(type='path', default='/var/lib/jbossas/standalone/deployments'),
state=dict(choices=['absent', 'present'], default='present'),
),
required_if=[('state', 'present', ('src',))]
)
result = dict(changed=False)
src = module.params['src']
deployment = module.params['deployment']
deploy_path = module.params['deploy_path']
state = module.params['state']
if not os.path.exists(deploy_path):
module.fail_json(msg="deploy_path does not exist.")
deployed = is_deployed(deploy_path, deployment)
if state == 'present' and not deployed:
if not os.path.exists(src):
module.fail_json(msg='Source file %s does not exist.' % src)
if is_failed(deploy_path, deployment):
# Clean up old failed deployment
os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
if state == 'present' and deployed:
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
deployed = False
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
if state == 'absent' and deployed:
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
while deployed:
deployed = not is_undeployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Undeploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
teemulehtinen/a-plus | refs/heads/master | selenium_test/test/teacher_feedback_test.py | 3 | import unittest
from test_initializer import TestInitializer
from page_objects import LoginPage, FileUploadGrader, AssessmentPage, SubmissionPage, HomePage, StudentFeedbackPage
class TeacherFeedbackTest(unittest.TestCase):
def setUp(self):
self.driver = TestInitializer().getDefaultDriver()
TestInitializer().recreateDatabase()
def testStudentShouldGetFeedbackWithNotification(self):
ASSISTANT_FEEDBACK_TEXT = "ASSISTANT_FEEDBACK"
FEEDBACK_TEXT = "FEEDBACK"
EXERCISE_NUMBER = "2"
SUBMISSION_NUMBER = 1
POINTS = "50"
# Submit exercise
LoginPage(self.driver).loginAsStudent()
fileUploadGrader = FileUploadGrader(self.driver)
fileUploadGrader.submit()
fileUploadGrader.logout()
# Check submissions
LoginPage(self.driver).loginAsAssistant()
submissionPage = SubmissionPage(self.driver, exerciseId=EXERCISE_NUMBER)
self.assertEqual(submissionPage.getSubmissionCount(), 1)
# Assess exercise
assessmentPage = AssessmentPage(self.driver, exerciseId=EXERCISE_NUMBER, submissionNumber=SUBMISSION_NUMBER)
assessmentPage.setAssistantFeedback(ASSISTANT_FEEDBACK_TEXT)
assessmentPage.setFeedback(FEEDBACK_TEXT)
assessmentPage.setPoints(POINTS)
assessmentPage.submit()
assessmentPage.logout()
# Check that student receives the correct assessment and a notification of it
LoginPage(self.driver).loginAsStudent()
homePage = HomePage(self.driver)
self.assertTrue(homePage.hasNewNotifications())
studentFeedbackPage = StudentFeedbackPage(self.driver, exerciseId=EXERCISE_NUMBER, submissionNumber=SUBMISSION_NUMBER)
self.assertEqual(studentFeedbackPage.getAssistantFeedbackText(), ASSISTANT_FEEDBACK_TEXT)
self.assertEqual(studentFeedbackPage.getFeedbackText(), FEEDBACK_TEXT)
self.assertEqual(FileUploadGrader(self.driver).getPoints(), POINTS)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main(verbosity=2)
|
gregdek/ansible | refs/heads/devel | lib/ansible/modules/system/mksysb.py | 52 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
author: Kairo Araujo (@kairoaraujo)
module: mksysb
short_description: Generates AIX mksysb rootvg backups.
description:
- This module manages a basic AIX mksysb (image) of rootvg.
version_added: "2.5"
options:
backup_crypt_files:
description:
- Backup encrypted files.
type: bool
default: "yes"
backup_dmapi_fs:
description:
- Back up DMAPI filesystem files.
type: bool
default: "yes"
create_map_files:
description:
- Creates a new MAP files.
type: bool
default: "no"
exclude_files:
description:
- Excludes files using C(/etc/rootvg.exclude).
type: bool
default: "no"
exclude_wpar_files:
description:
- Excludes WPAR files.
type: bool
default: "no"
extended_attrs:
description:
- Backup extended attributes.
type: bool
default: "yes"
name:
description:
- Backup name
required: true
new_image_data:
description:
- Creates a new file data.
type: bool
default: "yes"
software_packing:
description:
- Exclude files from packing option listed in
C(/etc/exclude_packing.rootvg).
type: bool
default: "no"
storage_path:
description:
- Storage path where the mksysb will stored.
required: true
use_snapshot:
description:
- Creates backup using snapshots.
type: bool
default: "no"
'''
EXAMPLES = '''
- name: Running a backup image mksysb
mksysb:
name: myserver
storage_path: /repository/images
exclude_files: yes
exclude_wpar_files: yes
'''
RETURN = '''
changed:
description: Return changed for mksysb actions as true or false.
returned: always
type: bool
version_added: 2.5
msg:
description: Return message regarding the action.
returned: always
type: str
version_added: 2.5
'''
from ansible.module_utils.basic import AnsibleModule
import os
def main():
module = AnsibleModule(
argument_spec=dict(
backup_crypt_files=dict(type='bool', default=True),
backup_dmapi_fs=dict(type='bool', default=True),
create_map_files=dict(type='bool', default=False),
exclude_files=dict(type='bool', default=False),
exclude_wpar_files=dict(type='bool', default=False),
extended_attrs=dict(type='bool', default=True),
name=dict(required=True),
new_image_data=dict(type='bool', default=True),
software_packing=dict(type='bool', default=False),
storage_path=dict(required=True),
use_snapshot=dict(type='bool', default=False)
),
supports_check_mode=True,
)
# Command options.
map_file_opt = {
True: '-m',
False: ''
}
use_snapshot_opt = {
True: '-T',
False: ''
}
exclude_files_opt = {
True: '-e',
False: ''
}
exclude_wpar_opt = {
True: '-G',
False: ''
}
new_image_data_opt = {
True: '-i',
False: ''
}
soft_packing_opt = {
True: '',
False: '-p'
}
extend_attr_opt = {
True: '',
False: '-a'
}
crypt_files_opt = {
True: '',
False: '-Z'
}
dmapi_fs_opt = {
True: '-a',
False: ''
}
backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']]
backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']]
create_map_files = map_file_opt[module.params['create_map_files']]
exclude_files = exclude_files_opt[module.params['exclude_files']]
exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']]
extended_attrs = extend_attr_opt[module.params['extended_attrs']]
name = module.params['name']
new_image_data = new_image_data_opt[module.params['new_image_data']]
software_packing = soft_packing_opt[module.params['software_packing']]
storage_path = module.params['storage_path']
use_snapshot = use_snapshot_opt[module.params['use_snapshot']]
# Validate if storage_path is a valid directory.
if os.path.isdir(storage_path):
if not module.check_mode:
# Generates the mksysb image backup.
mksysb_cmd = module.get_bin_path('mksysb', True)
rc, mksysb_output, err = module.run_command(
"%s -X %s %s %s %s %s %s %s %s %s %s/%s" % (
mksysb_cmd, create_map_files, use_snapshot, exclude_files,
exclude_wpar_files, software_packing, extended_attrs,
backup_crypt_files, backup_dmapi_fs, new_image_data,
storage_path, name))
if rc == 0:
module.exit_json(changed=True, msg=mksysb_output)
else:
module.fail_json(msg="mksysb failed.", rc=rc, err=err)
module.exit_json(changed=True)
else:
module.fail_json(msg="Storage path %s is not valid." % storage_path)
if __name__ == '__main__':
main()
|
Lilywei123/tempest | refs/heads/master | tempest/tests/test_wrappers.py | 11 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import StringIO
import subprocess
import tempfile
from tempest.tests import base
DEVNULL = open(os.devnull, 'wb')
class TestWrappers(base.TestCase):
def setUp(self):
super(TestWrappers, self).setUp()
# Setup test dirs
self.directory = tempfile.mkdtemp(prefix='tempest-unit')
self.addCleanup(shutil.rmtree, self.directory)
self.test_dir = os.path.join(self.directory, 'tests')
os.mkdir(self.test_dir)
# Setup Test files
self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
self.init_file = os.path.join(self.test_dir, '__init__.py')
self.setup_py = os.path.join(self.directory, 'setup.py')
shutil.copy('tempest/tests/files/testr-conf', self.testr_conf_file)
shutil.copy('tempest/tests/files/passing-tests', self.passing_file)
shutil.copy('tempest/tests/files/failing-tests', self.failing_file)
shutil.copy('setup.py', self.setup_py)
shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
shutil.copy('tempest/tests/files/__init__.py', self.init_file)
# copy over the pretty_tox scripts
shutil.copy('tools/pretty_tox.sh',
os.path.join(self.directory, 'pretty_tox.sh'))
shutil.copy('tools/pretty_tox_serial.sh',
os.path.join(self.directory, 'pretty_tox_serial.sh'))
self.stdout = StringIO.StringIO()
self.stderr = StringIO.StringIO()
# Change directory, run wrapper and check result
self.addCleanup(os.chdir, os.path.abspath(os.curdir))
os.chdir(self.directory)
def assertRunExit(self, cmd, expected):
p = subprocess.Popen(
"bash %s" % cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
self.assertEqual(
p.returncode, expected,
"Stdout: %s; Stderr: %s" % (out, err))
def test_pretty_tox(self):
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
subprocess.call(['git', 'init'], stderr=DEVNULL)
self.assertRunExit('pretty_tox.sh passing', 0)
def test_pretty_tox_fails(self):
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
subprocess.call(['git', 'init'], stderr=DEVNULL)
self.assertRunExit('pretty_tox.sh', 1)
def test_pretty_tox_serial(self):
self.assertRunExit('pretty_tox_serial.sh passing', 0)
def test_pretty_tox_serial_fails(self):
self.assertRunExit('pretty_tox_serial.sh', 1)
|
danilito19/django | refs/heads/master | tests/test_utils/views.py | 481 | from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.template import Context, Template
from .models import Person
def get_person(request, pk):
person = get_object_or_404(Person, pk=pk)
return HttpResponse(person.name)
def no_template_used(request):
template = Template("This is a string-based template")
return HttpResponse(template.render(Context({})))
def empty_response(request):
return HttpResponse('')
|
shubhdev/edx-platform | refs/heads/master | common/test/acceptance/pages/studio/video/video.py | 58 | """
CMS Video
"""
import time
import os
import requests
from bok_choy.promise import EmptyPromise, Promise
from bok_choy.javascript import wait_for_js, js_defined
from ....tests.helpers import YouTubeStubConfig
from ...lms.video.video import VideoPage
from selenium.webdriver.common.keys import Keys
from ..utils import wait_for_notification
CLASS_SELECTORS = {
'video_container': 'div.video',
'video_init': '.is-initialized',
'video_xmodule': '.xmodule_VideoModule',
'video_spinner': '.video-wrapper .spinner',
'video_controls': 'section.video-controls',
'attach_asset': '.upload-dialog > input[type="file"]',
'upload_dialog': '.wrapper-modal-window-assetupload',
'xblock': '.add-xblock-component',
'slider_range': '.slider-range',
'error': '.transcripts-error-message',
'url_inputs': '.videolist-settings-item input.input',
'collapse_bar': '.videolist-extra-videos',
'status': '.transcripts-message-status',
'attach_transcript': '.file-chooser > input[type="file"]',
}
BUTTON_SELECTORS = {
'create_video': 'a[data-category="video"]',
'handout_download': '.video-handout.video-download-button a',
'handout_download_editor': '.wrapper-comp-setting.file-uploader .download-action',
'upload_asset': '.upload-action',
'asset_submit': '.action-upload',
'handout_clear': '.wrapper-comp-setting.file-uploader .setting-clear',
'translations_clear': '.metadata-video-translations .setting-clear',
'translation_add': '.wrapper-translations-settings > a',
'import': '.setting-import',
'download_to_edit': '.setting-download',
'disabled_download_to_edit': '.setting-download.is-disabled',
'upload_new_timed_transcripts': '.setting-upload',
'replace': '.setting-replace',
'choose': '.setting-choose',
'use_existing': '.setting-use-existing',
'collapse_link': '.collapse-action.collapse-setting',
}
DISPLAY_NAME = "Component Display Name"
DEFAULT_SETTINGS = [
# basic
[DISPLAY_NAME, 'Video', False],
['Default Video URL', 'http://youtu.be/3_yD_cEKoCk, , ', False],
# advanced
[DISPLAY_NAME, 'Video', False],
['Default Timed Transcript', '', False],
['Download Transcript Allowed', 'False', False],
['Downloadable Transcript URL', '', False],
['Show Transcript', 'True', False],
['Transcript Languages', '', False],
['Upload Handout', '', False],
['Video Available on Web Only', 'False', False],
['Video Download Allowed', 'False', False],
['Video File URLs', '', False],
['Video ID', '', False],
['Video Start Time', '00:00:00', False],
['Video Stop Time', '00:00:00', False],
['YouTube ID', '3_yD_cEKoCk', False],
['YouTube ID for .75x speed', '', False],
['YouTube ID for 1.25x speed', '', False],
['YouTube ID for 1.5x speed', '', False]
]
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@js_defined('window.Video', 'window.RequireJS.require', 'window.jQuery', 'window.XModule', 'window.XBlock',
'window.MathJax', 'window.MathJax.isReady')
class VideoComponentPage(VideoPage):
"""
CMS Video Component Page
"""
url = None
@wait_for_js
def is_browser_on_page(self):
return self.q(css='div{0}'.format(CLASS_SELECTORS['video_xmodule'])).present or self.q(
css='div{0}'.format(CLASS_SELECTORS['xblock'])).present
def get_element_selector(self, class_name, vertical=False):
return super(VideoComponentPage, self).get_element_selector(class_name, vertical=vertical)
def _wait_for(self, check_func, desc, result=False, timeout=30):
"""
Calls the method provided as an argument until the Promise satisfied or BrokenPromise
Arguments:
check_func (callable): Promise function to be fulfilled.
desc (str): Description of the Promise, used in log messages.
result (bool): Indicates whether we need result from Promise or not
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out.
"""
if result:
return Promise(check_func, desc, timeout=timeout).fulfill()
else:
return EmptyPromise(check_func, desc, timeout=timeout).fulfill()
def wait_for_video_component_render(self):
"""
Wait until video component rendered completely
"""
if not YouTubeStubConfig.get_configuration().get('youtube_api_blocked'):
self._wait_for(lambda: self.q(css=CLASS_SELECTORS['video_init']).present, 'Video Player Initialized')
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['video_spinner']).visible,
'Video Buffering Completed')
self._wait_for(self.is_controls_visible, 'Player Controls are Visible')
@wait_for_js
def is_controls_visible(self):
"""
Get current visibility sate of all video controls.
Returns:
bool: True means video controls are visible for all videos, False means video controls are not visible
for one or more videos
"""
return self.q(css=CLASS_SELECTORS['video_controls']).visible
def click_button(self, button_name, index=0, require_notification=False):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
index (int): query index
"""
self.q(css=BUTTON_SELECTORS[button_name]).nth(index).click()
if require_notification:
wait_for_notification(self)
self.wait_for_ajax()
@staticmethod
def file_path(filename):
"""
Construct file path to be uploaded to assets.
Arguments:
filename (str): asset filename
"""
return os.sep.join(__file__.split(os.sep)[:-5]) + '/data/uploads/' + filename
def upload_handout(self, handout_filename):
"""
Upload a handout file to assets
Arguments:
handout_filename (str): handout file name
"""
self.upload_asset(handout_filename)
def upload_asset(self, asset_filename, asset_type='handout', index=0):
"""
Upload a asset file to assets
Arguments:
asset_filename (str): asset file name
asset_type (str): one of `handout`, `transcript`
index (int): query index
"""
asset_file_path = self.file_path(asset_filename)
self.click_button('upload_asset', index)
self.q(css=CLASS_SELECTORS['attach_asset']).results[0].send_keys(asset_file_path)
self.click_button('asset_submit')
# Only srt format transcript files can be uploaded, If an error
# occurs due to incorrect transcript file we will return from here
if asset_type == 'transcript' and self.q(css='#upload_error').present:
return
# confirm upload completion
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['upload_dialog']).present, 'Upload Completed')
def clear_handout(self):
"""
Clear handout from settings
"""
self.click_button('handout_clear')
def _get_handout(self, url):
"""
Download handout at `url`
"""
kwargs = dict()
session_id = [{i['name']: i['value']} for i in self.browser.get_cookies() if i['name'] == u'sessionid']
if session_id:
kwargs.update({
'cookies': session_id[0]
})
response = requests.get(url, **kwargs)
return response.status_code < 400, response.headers
def download_handout(self, mime_type, is_editor=False):
"""
Download handout with mime type specified by `mime_type`
Arguments:
mime_type (str): mime type of handout file
Returns:
tuple: Handout download result.
"""
selector = BUTTON_SELECTORS['handout_download_editor'] if is_editor else BUTTON_SELECTORS['handout_download']
handout_url = self.q(css=selector).attrs('href')[0]
result, headers = self._get_handout(handout_url)
return result, headers['content-type'] == mime_type
@property
def is_handout_button_visible(self):
"""
Check if handout download button is visible
"""
return self.q(css=BUTTON_SELECTORS['handout_download']).visible
def create_video(self):
"""
Create a Video Component by clicking on Video button and wait for rendering completion.
"""
# Create video
self.click_button('create_video', require_notification=True)
self.wait_for_video_component_render()
def xblocks(self):
"""
Tells the total number of video xblocks present on current unit page.
Returns:
(int): total video xblocks
"""
return len(self.q(css='.xblock-header').filter(
lambda el: 'xblock-header-video' in el.get_attribute('class')).results)
def focus_caption_line(self, line_number):
"""
Focus a caption line as specified by `line_number`
Arguments:
line_number (int): caption line number
"""
caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1)
self.q(css=caption_line_selector).results[0].send_keys(Keys.ENTER)
def is_caption_line_focused(self, line_number):
"""
Check if a caption line focused
Arguments:
line_number (int): caption line number
"""
caption_line_selector = ".subtitles > li[data-index='{index}']".format(index=line_number - 1)
attributes = self.q(css=caption_line_selector).attrs('class')
return 'focused' in attributes
@property
def is_slider_range_visible(self):
"""
Return True if slider range is visible.
"""
return self.q(css=CLASS_SELECTORS['slider_range']).visible
def verify_settings(self):
"""
Verify that video component has correct default settings.
"""
query = '.wrapper-comp-setting'
settings = self.q(css=query).results
if len(DEFAULT_SETTINGS) != len(settings):
return False
for counter, setting in enumerate(settings):
is_verified = self._verify_setting_entry(setting,
DEFAULT_SETTINGS[counter][0],
DEFAULT_SETTINGS[counter][1])
if not is_verified:
return is_verified
return True
@staticmethod
def _verify_setting_entry(setting, field_name, field_value):
"""
Verify a `setting` entry.
Arguments:
setting (WebElement): Selenium WebElement
field_name (str): Name of field
field_value (str): Value of field
Returns:
bool: Does `setting` have correct value.
"""
if field_name != setting.find_element_by_class_name('setting-label').get_attribute('innerHTML'):
return False
# Get class attribute values
classes = setting.get_attribute('class').split()
list_type_classes = ['metadata-list-enum', 'metadata-dict', 'metadata-video-translations']
is_list_type = any(list_type in classes for list_type in list_type_classes)
if is_list_type:
current_value = ', '.join(
ele.get_attribute('value') for ele in setting.find_elements_by_class_name('list-settings-item'))
elif 'metadata-videolist-enum' in setting.get_attribute('class'):
current_value = ', '.join(item.find_element_by_tag_name('input').get_attribute('value') for item in
setting.find_elements_by_class_name('videolist-settings-item'))
else:
current_value = setting.find_element_by_class_name('setting-input').get_attribute('value')
if field_value != current_value:
return False
# Clear button should be visible(active class is present) for
# every setting that don't have 'metadata-videolist-enum' class
if 'metadata-videolist-enum' not in setting.get_attribute('class'):
setting_clear_button = setting.find_elements_by_class_name('setting-clear')[0]
if 'active' not in setting_clear_button.get_attribute('class'):
return False
return True
def set_field_value(self, field_name, field_value, field_type='input'):
"""
Set settings input `field` with `value`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
field_type (str): `input`, `select` etc(more to be added later)
"""
query = '.wrapper-comp-setting > label:nth-child(1)'
field_id = ''
if field_type == 'input':
for index, _ in enumerate(self.q(css=query)):
if field_name in self.q(css=query).nth(index).text[0]:
field_id = self.q(css=query).nth(index).attrs('for')[0]
break
self.q(css='#{}'.format(field_id)).fill(field_value)
elif field_type == 'select':
self.q(css='select[name="{0}"] option[value="{1}"]'.format(field_name, field_value)).first.click()
def verify_field_value(self, field_name, field_value):
"""
Get settings value of `field_name`
Arguments:
field_name (str): Name of field
field_value (str): Name of value
Returns:
bool: If `field_name` has `field_value`
"""
_, setting = self._get_setting_entry(field_name)
return self._verify_setting_entry(setting, field_name, field_value)
def _get_setting_entry(self, field_name):
"""
Get setting entry of `field_name`
Arguments:
field_name (str): Name of field
Returns:
setting (WebElement): Selenium WebElement
"""
for index, setting in enumerate(self.q(css='.wrapper-comp-setting').results):
if setting.find_element_by_class_name('setting-label').get_attribute('innerHTML') == field_name:
return index, setting
def translations_count(self):
"""
Get count of translations.
"""
return len(self.q(css='.wrapper-translations-settings .list-settings-item').results)
def select_translation_language(self, language_code, index=0):
"""
Select translation language as specified by `language_code`
Arguments:
language_code (str):
index (int): query index
"""
translations_items = '.wrapper-translations-settings .list-settings-item'
language_selector = translations_items + ' select option[value="{}"]'.format(language_code)
self.q(css=language_selector).nth(index).click()
def upload_translation(self, transcript_name, language_code):
"""
Upload a translation file.
Arguments:
transcript_name (str):
language_code (str):
"""
self.click_button('translation_add')
translations_count = self.translations_count()
self.select_translation_language(language_code, translations_count - 1)
self.upload_asset(transcript_name, asset_type='transcript', index=translations_count - 1)
def replace_translation(self, old_lang_code, new_lang_code, transcript_name):
"""
Replace a translation.
Arguments:
old_lang_code (str):
new_lang_code (str):
transcript_name (str):
"""
language_codes = self.translations()
index = language_codes.index(old_lang_code)
self.select_translation_language(new_lang_code, index)
self.upload_asset(transcript_name, asset_type='transcript', index=index)
def translations(self):
"""
Extract translations
Returns:
list: list of translation language codes
"""
translations_selector = '.metadata-video-translations .remove-setting'
return self.q(css=translations_selector).attrs('data-lang')
def download_translation(self, language_code, text_to_search):
"""
Download a translation having `language_code` and containing `text_to_search`
Arguments:
language_code (str): language code
text_to_search (str): text to search in translation
Returns:
bool: whether download was successful
"""
mime_type = 'application/x-subrip'
lang_code = '/{}?'.format(language_code)
link = [link for link in self.q(css='.download-action').attrs('href') if lang_code in link]
result, headers, content = self._get_transcript(link[0])
return result is True and mime_type in headers['content-type'] and text_to_search in content.decode('utf-8')
def remove_translation(self, language_code):
"""
Remove a translation having `language_code`
Arguments:
language_code (str): language code
"""
self.q(css='.remove-action').filter(lambda el: language_code == el.get_attribute('data-lang')).click()
@property
def upload_status_message(self):
"""
Get asset upload status message
"""
return self.q(css='#upload_error').text[0]
def captions_lines(self):
"""
Extract partial caption lines.
As all the captions lines are exactly same so only getting partial lines will work.
"""
self.wait_for_captions()
selector = '.subtitles > li:nth-child({})'
return ' '.join([self.q(css=selector.format(i)).text[0] for i in range(1, 6)])
def set_url_field(self, url, field_number):
"""
Set video url field in basic settings tab.
Arguments:
url (str): video url
field_number (int): video url field number
"""
if self.q(css=CLASS_SELECTORS['collapse_bar']).visible is False:
self.click_button('collapse_link')
self.q(css=CLASS_SELECTORS['url_inputs']).nth(field_number - 1).fill(url)
time.sleep(DELAY)
self.wait_for_ajax()
def message(self, message_type):
"""
Get video url field status/error message.
Arguments:
message_type(str): type(status, error) of message
Returns:
str: status/error message
"""
if message_type == 'status':
self.wait_for_element_visibility(CLASS_SELECTORS[message_type],
'{} message is Visible'.format(message_type.title()))
return self.q(css=CLASS_SELECTORS[message_type]).text[0]
def url_field_status(self, *field_numbers):
"""
Get video url field status(enable/disable).
Arguments:
url (str): video url
field_numbers (tuple or None): field numbers to check status for, None means get status for all.
tuple items will be integers and must start from 1
Returns:
dict: field numbers as keys and field status(bool) as values, False means a field is disabled
"""
if field_numbers:
index_list = [number - 1 for number in field_numbers]
else:
index_list = range(3) # maximum three fields
statuses = {}
for index in index_list:
status = 'is-disabled' not in self.q(css=CLASS_SELECTORS['url_inputs']).nth(index).attrs('class')[0]
statuses[index + 1] = status
return statuses
def clear_field(self, index):
"""
Clear a video url field at index specified by `index`.
"""
self.q(css=CLASS_SELECTORS['url_inputs']).nth(index - 1).fill('')
# Trigger an 'input' event after filling the field with an empty value.
self.browser.execute_script(
"$('{}:eq({})').trigger('{}')".format(CLASS_SELECTORS['url_inputs'], index, 'input'))
time.sleep(DELAY)
self.wait_for_ajax()
def clear_fields(self):
"""
Clear video url fields.
"""
script = """
$('{selector}')
.prop('disabled', false)
.removeClass('is-disabled')
.val('')
.trigger('input');
""".format(selector=CLASS_SELECTORS['url_inputs'])
self.browser.execute_script(script)
time.sleep(DELAY)
self.wait_for_ajax()
def revert_field(self, field_name):
"""
Revert a field.
"""
_, setting = self._get_setting_entry(field_name)
setting.find_element_by_class_name('setting-clear').click()
def is_transcript_button_visible(self, button_name, index=0, button_text=None):
"""
Check if a transcript related button is visible.
Arguments:
button_name (str): name of button
index (int): query index
button_text (str or None): text to match with text on a button, if None then don't match texts
Returns:
bool: is button visible
"""
is_visible = self.q(css=BUTTON_SELECTORS[button_name]).nth(index).visible
is_text_matched = True
if button_text and button_text != self.q(css=BUTTON_SELECTORS[button_name]).nth(index).text[0]:
is_text_matched = False
return is_visible and is_text_matched
def upload_transcript(self, transcript_filename):
"""
Upload a Transcript
Arguments:
transcript_filename (str): name of transcript file
"""
# Show the Browse Button
self.browser.execute_script("$('form.file-chooser').show()")
asset_file_path = self.file_path(transcript_filename)
self.q(css=CLASS_SELECTORS['attach_transcript']).results[0].send_keys(asset_file_path)
# confirm upload completion
self._wait_for(lambda: not self.q(css=CLASS_SELECTORS['attach_transcript']).visible, 'Upload Completed')
|
Shao-Feng/crosswalk-test-suite | refs/heads/master | webapi/tct-csp-w3c-tests/csp-py/csp_script-src_unsafe-inline_unsafe-eval.py | 30 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "script-src 'self' 'unsafe-inline' 'unsafe-eval'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_script-src_unsafe-inline_unsafe-eval</title>
<link rel="author" title="Intel" href="http://www.intel.com/"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#script-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="script-src 'self' 'unsafe-inline' 'unsafe-eval'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<script src="support/csp.js"></script>
<script>
test(function() {
assert_equals(X, 10, "X is 10");
assert_equals(Y, 27, "Y is X+17");
}, document.title + "_allowed_int");
test(function() {
assert_equals(eval(X + Y), 37, "eval(X + Y) should be 37");
}, document.title + "_allowed_inline");
</script>
</body>
</html> """
|
ContinuumIO/odo | refs/heads/master | odo/backends/tests/test_ssh.py | 4 | from __future__ import absolute_import, division, print_function
import pytest
paramiko = pytest.importorskip('paramiko')
import pandas as pd
import numpy as np
import re
import os
import sys
from odo.utils import tmpfile, filetext
from odo.directory import _Directory, Directory
from odo.backends.ssh import SSH, resource, ssh_pattern, sftp, drop, connect
from odo.backends.csv import CSV
from odo import into, discover, CSV, JSONLines, JSON, convert
from odo.temp import _Temp, Temp
from odo.compatibility import ON_TRAVIS_CI
import socket
skipif = pytest.mark.skipif
# NOTE: this is a workaround for paramiko on Py2; connect() hangs without
# raising an exception. Shows up on paramiko 1.16.0 and 2.0.2 with Py 2.7.
# KWS: 2016-08-10
# JJ: Still happens as of 2016-10-20
try_to_connect = sys.version_info[0] >= 3
pytestmark = skipif(not try_to_connect, reason='could not connect')
if try_to_connect:
try:
ssh = connect(hostname='localhost')
ssh.close()
except socket.error:
pytestmark = pytest.mark.skip('Could not connect')
except paramiko.PasswordRequiredException as e:
pytestmark = pytest.mark.skip(str(e))
except paramiko.SSHException as e:
pytestmark = pytest.mark.skip(str(e))
except TypeError:
# NOTE: This is a workaround for paramiko version 1.16.0 on Python 3.4,
# that raises a TypeError due to improper indexing internally into
# dict_keys when a ConnectionRefused error is raised.
# KWS 2016-04-21.
pytestmark = pytest.mark.skip('Could not connect')
def test_resource():
r = resource('ssh://joe@localhost:/path/to/myfile.csv')
assert isinstance(r, SSH(CSV))
assert r.path == '/path/to/myfile.csv'
assert r.auth['hostname'] == 'localhost'
assert r.auth['username'] == 'joe'
def test_connect():
a = connect(hostname='localhost')
b = connect(hostname='localhost')
assert a is b
a.close()
c = connect(hostname='localhost')
assert a is c
assert c.get_transport() and c.get_transport().is_active()
def test_resource_directory():
r = resource('ssh://joe@localhost:/path/to/')
assert issubclass(r.subtype, _Directory)
r = resource('ssh://joe@localhost:/path/to/*.csv')
assert r.subtype == Directory(CSV)
assert r.path == '/path/to/'
def test_discover():
with filetext('name,balance\nAlice,100\nBob,200') as fn:
local = CSV(fn)
remote = SSH(CSV)(fn, hostname='localhost')
assert discover(local) == discover(remote)
def test_discover_from_resource():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
local = CSV(fn)
remote = resource('ssh://localhost:' + fn)
assert discover(local) == discover(remote)
def test_ssh_pattern():
uris = ['localhost:myfile.csv',
'127.0.0.1:/myfile.csv',
'user@127.0.0.1:/myfile.csv',
'user@127.0.0.1:/*.csv',
'user@127.0.0.1:/my-dir/my-file3.csv']
for uri in uris:
assert re.match(ssh_pattern, uri)
def test_copy_remote_csv():
with tmpfile('csv') as target:
with filetext('name,balance\nAlice,100\nBob,200',
extension='csv') as fn:
csv = resource(fn)
uri = 'ssh://localhost:%s.csv' % target
scsv = into(uri, csv)
assert isinstance(scsv, SSH(CSV))
assert discover(scsv) == discover(csv)
# Round trip
csv2 = into(target, scsv)
assert into(list, csv) == into(list, csv2)
def test_drop():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
with tmpfile('csv') as target:
scsv = SSH(CSV)(target, hostname='localhost')
assert not os.path.exists(target)
conn = sftp(**scsv.auth)
conn.put(fn, target)
assert os.path.exists(target)
drop(scsv)
drop(scsv)
assert not os.path.exists(target)
def test_drop_of_csv_json_lines_use_ssh_version():
from odo.backends.ssh import drop_ssh
for typ in [CSV, JSON, JSONLines]:
assert drop.dispatch(SSH(typ)) == drop_ssh
def test_convert_local_file_to_temp_ssh_file():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
scsv = convert(Temp(SSH(CSV)), csv, hostname='localhost')
assert into(list, csv) == into(list, scsv)
@skipif(ON_TRAVIS_CI, reason="Don't know")
def test_temp_ssh_files():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
scsv = into(Temp(SSH(CSV)), csv, hostname='localhost')
assert discover(csv) == discover(scsv)
assert isinstance(scsv, _Temp)
@skipif(ON_TRAVIS_CI, reason="Don't know")
def test_convert_through_temporary_local_storage():
with filetext('name,quantity\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
df = into(pd.DataFrame, csv)
scsv = into(Temp(SSH(CSV)), csv, hostname='localhost')
assert into(list, csv) == into(list, scsv)
scsv2 = into(Temp(SSH(CSV)), df, hostname='localhost')
assert into(list, scsv2) == into(list, df)
sjson = into(Temp(SSH(JSONLines)), df, hostname='localhost')
assert (into(np.ndarray, sjson) == into(np.ndarray, df)).all()
@skipif(ON_TRAVIS_CI and sys.version_info[0] == 3,
reason='Strange hanging on travis for python33 and python34')
def test_ssh_csv_to_s3_csv():
# for some reason this can only be run in the same file as other ssh tests
# and must be a Temp(SSH(CSV)) otherwise tests above this one fail
s3_bucket = pytest.importorskip('odo.backends.tests.test_aws').s3_bucket
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
remote = into(Temp(SSH(CSV)), CSV(fn), hostname='localhost')
with s3_bucket('.csv') as b:
result = into(b, remote)
assert discover(result) == discover(resource(b))
@skipif(ON_TRAVIS_CI and sys.version_info[0] == 3,
reason='Strange hanging on travis for python33 and python34')
def test_s3_to_ssh():
pytest.importorskip('boto')
tips_uri = 's3://nyqpug/tips.csv'
with tmpfile('.csv') as fn:
result = into(Temp(SSH(CSV))(fn, hostname='localhost'), tips_uri)
assert into(list, result) == into(list, tips_uri)
assert discover(result) == discover(resource(tips_uri))
|
anfedorov/atom-script | refs/heads/master | examples/version_info.py | 18 | #!/usr/bin/env python2.7
import sys
print(sys.version_info)
|
damonkohler/sl4a | refs/heads/master | python/gdata/src/gdata/tlslite/utils/OpenSSL_TripleDES.py | 359 | """OpenSSL/M2Crypto 3DES implementation."""
from cryptomath import *
from TripleDES import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return ciphertext
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return plaintext |
bigswitch/neutron | refs/heads/master | neutron/tests/unit/agent/metadata/test_agent.py | 1 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants as n_const
import testtools
import webob
from neutron.agent.linux import utils as agent_utils
from neutron.agent.metadata import agent
from neutron.agent.metadata import config
from neutron.agent import metadata_agent
from neutron.common import utils
from neutron.tests import base
class FakeConf(object):
auth_ca_cert = None
nova_metadata_ip = '9.9.9.9'
nova_metadata_port = 8775
metadata_proxy_shared_secret = 'secret'
nova_metadata_protocol = 'http'
nova_metadata_insecure = True
nova_client_cert = 'nova_cert'
nova_client_priv_key = 'nova_priv_key'
cache_url = ''
class FakeConfCache(FakeConf):
cache_url = 'memory://?default_ttl=5'
class TestMetadataProxyHandlerBase(base.BaseTestCase):
fake_conf = FakeConf
def setUp(self):
super(TestMetadataProxyHandlerBase, self).setUp()
self.log_p = mock.patch.object(agent, 'LOG')
self.log = self.log_p.start()
self.handler = agent.MetadataProxyHandler(self.fake_conf)
self.handler.plugin_rpc = mock.Mock()
self.handler.context = mock.Mock()
class TestMetadataProxyHandlerRpc(TestMetadataProxyHandlerBase):
def test_get_port_filters(self):
router_id = 'test_router_id'
ip = '1.2.3.4'
networks = ('net_id1', 'net_id2')
expected = {'device_id': [router_id],
'device_owner': n_const.ROUTER_INTERFACE_OWNERS,
'network_id': networks,
'fixed_ips': {'ip_address': [ip]}}
actual = self.handler._get_port_filters(router_id, ip, networks)
self.assertEqual(expected, actual)
def test_get_router_networks(self):
router_id = 'router-id'
expected = ('network_id1', 'network_id2')
ports = [{'network_id': 'network_id1', 'something': 42},
{'network_id': 'network_id2', 'something_else': 32}]
self.handler.plugin_rpc.get_ports.return_value = ports
networks = self.handler._get_router_networks(router_id)
self.assertEqual(expected, networks)
def test_get_ports_for_remote_address(self):
ip = '1.1.1.1'
networks = ('network_id1', 'network_id2')
expected = [{'port_id': 'port_id1'},
{'port_id': 'port_id2'}]
self.handler.plugin_rpc.get_ports.return_value = expected
ports = self.handler._get_ports_for_remote_address(ip, networks)
self.assertEqual(expected, ports)
class TestMetadataProxyHandlerCache(TestMetadataProxyHandlerBase):
fake_conf = FakeConfCache
def test_call(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.return_value = ('instance_id', 'tenant_id')
with mock.patch.object(self.handler, '_proxy_request') as proxy:
proxy.return_value = 'value'
retval = self.handler(req)
self.assertEqual(retval, 'value')
def test_call_no_instance_match(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.return_value = None, None
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPNotFound)
def test_call_internal_server_error(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.side_effect = Exception
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
self.assertEqual(len(self.log.mock_calls), 2)
def test_get_router_networks(self):
router_id = 'router-id'
expected = ('network_id1', 'network_id2')
ports = [{'network_id': 'network_id1', 'something': 42},
{'network_id': 'network_id2', 'something_else': 32}]
mock_get_ports = self.handler.plugin_rpc.get_ports
mock_get_ports.return_value = ports
networks = self.handler._get_router_networks(router_id)
mock_get_ports.assert_called_once_with(
mock.ANY,
{'device_id': [router_id],
'device_owner': n_const.ROUTER_INTERFACE_OWNERS})
self.assertEqual(expected, networks)
def _test_get_router_networks_twice_helper(self):
router_id = 'router-id'
ports = [{'network_id': 'network_id1', 'something': 42}]
expected_networks = ('network_id1',)
with mock.patch(
'oslo_utils.timeutils.utcnow_ts', return_value=0):
mock_get_ports = self.handler.plugin_rpc.get_ports
mock_get_ports.return_value = ports
networks = self.handler._get_router_networks(router_id)
mock_get_ports.assert_called_once_with(
mock.ANY,
{'device_id': [router_id],
'device_owner': n_const.ROUTER_INTERFACE_OWNERS})
self.assertEqual(expected_networks, networks)
networks = self.handler._get_router_networks(router_id)
def test_get_router_networks_twice(self):
self._test_get_router_networks_twice_helper()
self.assertEqual(
1, self.handler.plugin_rpc.get_ports.call_count)
def _get_ports_for_remote_address_cache_hit_helper(self):
remote_address = 'remote_address'
networks = ('net1', 'net2')
mock_get_ports = self.handler.plugin_rpc.get_ports
mock_get_ports.return_value = [{'network_id': 'net1', 'something': 42}]
self.handler._get_ports_for_remote_address(remote_address, networks)
mock_get_ports.assert_called_once_with(
mock.ANY,
{'network_id': networks,
'fixed_ips': {'ip_address': [remote_address]}}
)
self.assertEqual(1, mock_get_ports.call_count)
self.handler._get_ports_for_remote_address(remote_address,
networks)
def test_get_ports_for_remote_address_cache_hit(self):
self._get_ports_for_remote_address_cache_hit_helper()
self.assertEqual(
1, self.handler.plugin_rpc.get_ports.call_count)
def test_get_ports_network_id(self):
network_id = 'network-id'
router_id = 'router-id'
remote_address = 'remote-address'
expected = ['port1']
networks = (network_id,)
with mock.patch.object(self.handler,
'_get_ports_for_remote_address'
) as mock_get_ip_addr,\
mock.patch.object(self.handler,
'_get_router_networks'
) as mock_get_router_networks:
mock_get_ip_addr.return_value = expected
ports = self.handler._get_ports(remote_address, network_id,
router_id)
mock_get_ip_addr.assert_called_once_with(remote_address,
networks)
self.assertFalse(mock_get_router_networks.called)
self.assertEqual(expected, ports)
def test_get_ports_router_id(self):
router_id = 'router-id'
remote_address = 'remote-address'
expected = ['port1']
networks = ('network1', 'network2')
with mock.patch.object(self.handler,
'_get_ports_for_remote_address',
return_value=expected
) as mock_get_ip_addr,\
mock.patch.object(self.handler,
'_get_router_networks',
return_value=networks
) as mock_get_router_networks:
ports = self.handler._get_ports(remote_address,
router_id=router_id)
mock_get_router_networks.assert_called_once_with(router_id)
mock_get_ip_addr.assert_called_once_with(remote_address, networks)
self.assertEqual(expected, ports)
def test_get_ports_no_id(self):
self.assertRaises(TypeError, self.handler._get_ports, 'remote_address')
def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval,
networks=None, router_id=None):
remote_address = '192.168.1.1'
headers['X-Forwarded-For'] = remote_address
req = mock.Mock(headers=headers)
def mock_get_ports(*args, **kwargs):
return list_ports_retval.pop(0)
self.handler.plugin_rpc.get_ports.side_effect = mock_get_ports
instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req)
expected = []
if router_id:
expected.append(
mock.call(
mock.ANY,
{'device_id': [router_id],
'device_owner': n_const.ROUTER_INTERFACE_OWNERS}
)
)
expected.append(
mock.call(
mock.ANY,
{'network_id': networks,
'fixed_ips': {'ip_address': ['192.168.1.1']}}
)
)
self.handler.plugin_rpc.get_ports.assert_has_calls(expected)
return (instance_id, tenant_id)
def test_get_instance_id_router_id(self):
router_id = 'the_id'
headers = {
'X-Neutron-Router-ID': router_id
}
networks = ('net1', 'net2')
ports = [
[{'network_id': 'net1'}, {'network_id': 'net2'}],
[{'device_id': 'device_id', 'tenant_id': 'tenant_id',
'network_id': 'net1'}]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=networks,
router_id=router_id),
('device_id', 'tenant_id')
)
def test_get_instance_id_router_id_no_match(self):
router_id = 'the_id'
headers = {
'X-Neutron-Router-ID': router_id
}
networks = ('net1', 'net2')
ports = [
[{'network_id': 'net1'}, {'network_id': 'net2'}],
[]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=networks,
router_id=router_id),
(None, None)
)
def test_get_instance_id_network_id(self):
network_id = 'the_id'
headers = {
'X-Neutron-Network-ID': network_id
}
ports = [
[{'device_id': 'device_id',
'tenant_id': 'tenant_id',
'network_id': 'the_id'}]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=('the_id',)),
('device_id', 'tenant_id')
)
def test_get_instance_id_network_id_no_match(self):
network_id = 'the_id'
headers = {
'X-Neutron-Network-ID': network_id
}
ports = [[]]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=('the_id',)),
(None, None)
)
def _proxy_request_test_helper(self, response_code=200, method='GET'):
hdrs = {'X-Forwarded-For': '8.8.8.8'}
body = 'body'
req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs,
method=method, body=body)
resp = mock.MagicMock(status=response_code)
req.response = resp
with mock.patch.object(self.handler, '_sign_instance_id') as sign:
sign.return_value = 'signed'
with mock.patch('httplib2.Http') as mock_http:
resp.__getitem__.return_value = "text/plain"
mock_http.return_value.request.return_value = (resp, 'content')
retval = self.handler._proxy_request('the_id', 'tenant_id',
req)
mock_http.assert_called_once_with(
ca_certs=None, disable_ssl_certificate_validation=True)
mock_http.assert_has_calls([
mock.call().add_certificate(
FakeConf.nova_client_priv_key,
FakeConf.nova_client_cert,
"%s:%s" % (FakeConf.nova_metadata_ip,
FakeConf.nova_metadata_port)
),
mock.call().request(
'http://9.9.9.9:8775/the_path',
method=method,
headers={
'X-Forwarded-For': '8.8.8.8',
'X-Instance-ID-Signature': 'signed',
'X-Instance-ID': 'the_id',
'X-Tenant-ID': 'tenant_id'
},
body=body
)]
)
return retval
def test_proxy_request_post(self):
response = self._proxy_request_test_helper(method='POST')
self.assertEqual(response.content_type, "text/plain")
self.assertEqual(response.body, 'content')
def test_proxy_request_200(self):
response = self._proxy_request_test_helper(200)
self.assertEqual(response.content_type, "text/plain")
self.assertEqual(response.body, 'content')
def test_proxy_request_400(self):
self.assertIsInstance(self._proxy_request_test_helper(400),
webob.exc.HTTPBadRequest)
def test_proxy_request_403(self):
self.assertIsInstance(self._proxy_request_test_helper(403),
webob.exc.HTTPForbidden)
def test_proxy_request_404(self):
self.assertIsInstance(self._proxy_request_test_helper(404),
webob.exc.HTTPNotFound)
def test_proxy_request_409(self):
self.assertIsInstance(self._proxy_request_test_helper(409),
webob.exc.HTTPConflict)
def test_proxy_request_500(self):
self.assertIsInstance(self._proxy_request_test_helper(500),
webob.exc.HTTPInternalServerError)
def test_proxy_request_other_code(self):
with testtools.ExpectedException(Exception):
self._proxy_request_test_helper(302)
def test_sign_instance_id(self):
self.assertEqual(
self.handler._sign_instance_id('foo'),
'773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4'
)
class TestMetadataProxyHandlerNoCache(TestMetadataProxyHandlerCache):
fake_conf = FakeConf
def test_get_router_networks_twice(self):
self._test_get_router_networks_twice_helper()
self.assertEqual(
2, self.handler.plugin_rpc.get_ports.call_count)
def test_get_ports_for_remote_address_cache_hit(self):
self._get_ports_for_remote_address_cache_hit_helper()
self.assertEqual(
2, self.handler.plugin_rpc.get_ports.call_count)
class TestUnixDomainMetadataProxy(base.BaseTestCase):
def setUp(self):
super(TestUnixDomainMetadataProxy, self).setUp()
self.cfg_p = mock.patch.object(agent, 'cfg')
self.cfg = self.cfg_p.start()
looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_mock = looping_call_p.start()
self.cfg.CONF.metadata_proxy_socket = '/the/path'
self.cfg.CONF.metadata_workers = 0
self.cfg.CONF.metadata_backlog = 128
self.cfg.CONF.metadata_proxy_socket_mode = config.USER_MODE
@mock.patch.object(utils, 'ensure_dir')
def test_init_doesnot_exists(self, ensure_dir):
agent.UnixDomainMetadataProxy(mock.Mock())
ensure_dir.assert_called_once_with('/the')
def test_init_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
isdir.return_value = True
agent.UnixDomainMetadataProxy(mock.Mock())
unlink.assert_called_once_with('/the/path')
def test_init_exists_unlink_no_file(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = False
unlink.side_effect = OSError
agent.UnixDomainMetadataProxy(mock.Mock())
unlink.assert_called_once_with('/the/path')
def test_init_exists_unlink_fails_file_still_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = True
unlink.side_effect = OSError
with testtools.ExpectedException(OSError):
agent.UnixDomainMetadataProxy(mock.Mock())
unlink.assert_called_once_with('/the/path')
@mock.patch.object(agent, 'MetadataProxyHandler')
@mock.patch.object(agent_utils, 'UnixDomainWSGIServer')
@mock.patch.object(utils, 'ensure_dir')
def test_run(self, ensure_dir, server, handler):
p = agent.UnixDomainMetadataProxy(self.cfg.CONF)
p.run()
ensure_dir.assert_called_once_with('/the')
server.assert_has_calls([
mock.call('neutron-metadata-agent'),
mock.call().start(handler.return_value,
'/the/path', workers=0,
backlog=128, mode=0o644),
mock.call().wait()]
)
def test_main(self):
with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy:
with mock.patch.object(metadata_agent, 'config') as config:
with mock.patch.object(metadata_agent, 'cfg') as cfg:
with mock.patch.object(utils, 'cfg'):
metadata_agent.main()
self.assertTrue(config.setup_logging.called)
proxy.assert_has_calls([
mock.call(cfg.CONF),
mock.call().run()]
)
def test_init_state_reporting(self):
with mock.patch('os.makedirs'):
proxy = agent.UnixDomainMetadataProxy(mock.Mock())
self.looping_mock.assert_called_once_with(proxy._report_state)
self.looping_mock.return_value.start.assert_called_once_with(
interval=mock.ANY)
def test_report_state(self):
with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api:
with mock.patch('os.makedirs'):
proxy = agent.UnixDomainMetadataProxy(mock.Mock())
self.assertTrue(proxy.agent_state['start_flag'])
proxy._report_state()
self.assertNotIn('start_flag', proxy.agent_state)
state_api_inst = state_api.return_value
state_api_inst.report_state.assert_called_once_with(
proxy.context, proxy.agent_state, use_call=True)
|
pixelrebel/st2 | refs/heads/master | st2common/tests/unit/test_util_mistral_dsl_transform.py | 7 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import six
import yaml
from st2tests import DbTestCase
from st2tests.fixturesloader import FixturesLoader
from st2common.exceptions.workflow import WorkflowDefinitionException
from st2common.models.api.action import ActionAPI, RunnerTypeAPI
from st2common.persistence.action import Action
from st2common.persistence.runner import RunnerType
from st2common.util.workflow import mistral as utils
WB_PRE_XFORM_FILE = 'wb_pre_xform.yaml'
WB_POST_XFORM_FILE = 'wb_post_xform.yaml'
WF_PRE_XFORM_FILE = 'wf_pre_xform.yaml'
WF_POST_XFORM_FILE = 'wf_post_xform.yaml'
WF_NO_REQ_PARAM_FILE = 'wf_missing_required_param.yaml'
WF_UNEXP_PARAM_FILE = 'wf_has_unexpected_param.yaml'
TEST_FIXTURES = {
'workflows': [
WB_PRE_XFORM_FILE,
WB_POST_XFORM_FILE,
WF_PRE_XFORM_FILE,
WF_POST_XFORM_FILE,
WF_NO_REQ_PARAM_FILE,
WF_UNEXP_PARAM_FILE
],
'actions': [
'local.yaml',
'a1.yaml',
'a2.yaml',
'action1.yaml'
],
'runners': [
'run-local.yaml',
'testrunner1.yaml',
'testrunner2.yaml'
]
}
PACK = 'generic'
LOADER = FixturesLoader()
FIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)
WB_PRE_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB_PRE_XFORM_FILE)
WB_PRE_XFORM_DEF = FIXTURES['workflows'][WB_PRE_XFORM_FILE]
WB_POST_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WB_POST_XFORM_FILE)
WB_POST_XFORM_DEF = FIXTURES['workflows'][WB_POST_XFORM_FILE]
WF_PRE_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_PRE_XFORM_FILE)
WF_PRE_XFORM_DEF = FIXTURES['workflows'][WF_PRE_XFORM_FILE]
WF_POST_XFORM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_POST_XFORM_FILE)
WF_POST_XFORM_DEF = FIXTURES['workflows'][WF_POST_XFORM_FILE]
WF_NO_REQ_PARAM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_NO_REQ_PARAM_FILE)
WF_NO_REQ_PARAM_DEF = FIXTURES['workflows'][WF_NO_REQ_PARAM_FILE]
WF_UNEXP_PARAM_PATH = LOADER.get_fixture_file_path_abs(PACK, 'workflows', WF_UNEXP_PARAM_FILE)
WF_UNEXP_PARAM_DEF = FIXTURES['workflows'][WF_UNEXP_PARAM_FILE]
class DSLTransformTestCase(DbTestCase):
@classmethod
def setUpClass(cls):
super(DSLTransformTestCase, cls).setUpClass()
for _, fixture in six.iteritems(FIXTURES['runners']):
instance = RunnerTypeAPI(**fixture)
RunnerType.add_or_update(RunnerTypeAPI.to_model(instance))
for _, fixture in six.iteritems(FIXTURES['actions']):
instance = ActionAPI(**fixture)
Action.add_or_update(ActionAPI.to_model(instance))
def _read_file_content(self, path):
with open(path, 'r') as f:
return f.read()
def _read_yaml_file_as_json(self, path):
def_yaml = self._read_file_content(path)
return yaml.safe_load(def_yaml)
def test_transform_workbook_dsl_yaml(self):
def_yaml = self._read_file_content(WB_PRE_XFORM_PATH)
new_def = utils.transform_definition(def_yaml)
actual = yaml.safe_load(new_def)
expected = copy.deepcopy(WB_POST_XFORM_DEF)
self.assertDictEqual(actual, expected)
def test_transform_workbook_dsl_dict(self):
def_dict = self._read_yaml_file_as_json(WB_PRE_XFORM_PATH)
actual = utils.transform_definition(def_dict)
expected = copy.deepcopy(WB_POST_XFORM_DEF)
self.assertDictEqual(actual, expected)
def test_transform_workflow_dsl_yaml(self):
def_yaml = self._read_file_content(WF_PRE_XFORM_PATH)
new_def = utils.transform_definition(def_yaml)
actual = yaml.safe_load(new_def)
expected = copy.deepcopy(WF_POST_XFORM_DEF)
self.assertDictEqual(actual, expected)
def test_transform_workflow_dsl_dict(self):
def_dict = self._read_yaml_file_as_json(WF_PRE_XFORM_PATH)
actual = utils.transform_definition(def_dict)
expected = copy.deepcopy(WF_POST_XFORM_DEF)
self.assertDictEqual(actual, expected)
def test_required_action_params_failure(self):
def_dict = self._read_yaml_file_as_json(WF_NO_REQ_PARAM_PATH)
with self.assertRaises(WorkflowDefinitionException) as cm:
utils.transform_definition(def_dict)
self.assertIn('Missing required parameters', cm.exception.message)
def test_unexpected_action_params_failure(self):
def_dict = self._read_yaml_file_as_json(WF_UNEXP_PARAM_PATH)
with self.assertRaises(WorkflowDefinitionException) as cm:
utils.transform_definition(def_dict)
self.assertIn('Unexpected parameters', cm.exception.message)
def test_deprecated_callback_action(self):
def_dict = self._read_yaml_file_as_json(WB_PRE_XFORM_PATH)
def_dict['workflows']['main']['tasks']['callback'] = {'action': 'st2.callback'}
def_yaml = yaml.safe_dump(def_dict)
self.assertRaises(WorkflowDefinitionException, utils.transform_definition, def_yaml)
|
Davideddu/kivy-forkedtouch | refs/heads/master | kivy/core/video/video_null.py | 81 |
'''
VideoNull: empty implementation of VideoBase for the no provider case
'''
from kivy.core.video import VideoBase
class VideoNull(VideoBase):
'''VideoBase implementation when there is no provider.
'''
pass
|
LUTAN/tensorflow | refs/heads/master | tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py | 81 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for creating Stochastic Tensors.
See the @{$python/contrib.bayesflow.stochastic_tensor} guide.
@@BaseStochasticTensor
@@StochasticTensor
@@MeanValue
@@SampleValue
@@value_type
@@get_current_value_type
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.bayesflow.python.ops.stochastic_tensor_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"BaseStochasticTensor",
"StochasticTensor",
"ObservedStochasticTensor",
"MeanValue",
"SampleValue",
"value_type",
"get_current_value_type",
]
remove_undocumented(__name__, _allowed_symbols)
|
jsilhan/dnf | refs/heads/master | dnf/cli/commands/upgrade.py | 1 | # upgrade.py
# Upgrade CLI command.
#
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli import commands
from dnf.i18n import _
from dnf.cli.option_parser import OptionParser
import dnf.exceptions
import logging
logger = logging.getLogger('dnf')
class UpgradeCommand(commands.Command):
"""A class containing methods needed by the cli to execute the
update command.
"""
aliases = ('upgrade', 'update', 'upgrade-to', 'update-to')
summary = _('upgrade a package or packages on your system')
@staticmethod
def set_argparser(parser):
parser.add_argument('packages', nargs='*', help=_('Package to upgrade'),
action=OptionParser.ParseSpecGroupFileCallback,
metavar=_('PACKAGE'))
def configure(self):
"""Verify that conditions are met so that this command can run.
These include that there are enabled repositories with gpg
keys, and that this command is being run by the root user.
"""
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
demands.resolving = True
demands.root_user = True
commands._checkGPGKey(self.base, self.cli)
commands._checkEnabledRepo(self.base, self.opts.filenames)
self.upgrade_minimal = None
self.all_security = None
def run(self):
self.cli._populate_update_security_filter(self.opts,
minimal=self.upgrade_minimal,
all=self.all_security)
done = False
if self.opts.filenames or self.opts.pkg_specs or self.opts.grp_specs:
# Update files.
if self.opts.filenames:
for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=False):
try:
self.base.package_upgrade(pkg)
except dnf.exceptions.MarkingError as e:
logger.info(_('No match for argument: %s'),
self.base.output.term.bold(pkg.location))
else:
done = True
# Update packages.
for pkg_spec in self.opts.pkg_specs:
try:
self.base.upgrade(pkg_spec)
except dnf.exceptions.MarkingError as e:
logger.info(_('No match for argument: %s'),
self.base.output.term.bold(pkg_spec))
else:
done = True
# Update groups.
if self.opts.grp_specs:
self.base.read_comps(arch_filter=True)
self.base.env_group_upgrade(self.opts.grp_specs)
done = True
else:
# Update all packages.
self.base.upgrade_all()
done = True
if not done:
raise dnf.exceptions.Error(_('No packages marked for upgrade.'))
|
willprice/weboob | refs/heads/master | modules/ilmatieteenlaitos/pages.py | 5 | # -*- coding: utf-8 -*-
# Copyright(C) 2015 Matthieu Weber
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from datetime import date
from itertools import imap, ifilter
from weboob.browser.pages import JsonPage, HTMLPage
from weboob.browser.elements import ItemElement, ListElement, DictElement, method
from weboob.capabilities.weather import Forecast, Current, City, Temperature
from weboob.browser.filters.json import Dict
from weboob.browser.filters.standard import Filter, CleanText, CleanDecimal, Regexp, Format, Date
class Id(Filter):
def filter(self, txt):
return txt.split(", ")[0]
class SearchCitiesPage(JsonPage):
@method
class iter_cities(DictElement):
item_xpath = '.'
ignore_duplicate = True
class item(ItemElement):
klass = City
obj_id = Id(Dict('id'))
obj_name = Dict('value')
class WeatherPage(HTMLPage):
@method
class iter_forecast(ListElement):
item_xpath = ('//div[contains(@class, "mid") and contains(@class, "local-weather-forecast")]//'
'tr[@class="meteogram-dates"]/td')
class item(ItemElement):
klass = Forecast
obj_id = CleanText('.//span/@title')
def obj_date(self):
months = [u'tammikuuta', u'helmikuuta', u'maaliskuuta', u'huhtikuuta', u'toukokuuta', u'kesäkuuta',
u'heinäkuuta', u'elokuuta', u'syyskuuta', u'lokakuuta', u'marraskuuta', u'joulukuuta']
d = CleanText('.//span/@title')(self).split()
return date(int(d[2]), months.index(d[1])+1, int(d[0].strip(".")))
def temperatures(self):
offset = int(CleanText('string(sum(./preceding-sibling::td/@colspan))')(self))
length = int(CleanText('@colspan')(self))
temps = CleanText('../../../tbody/tr[@class="meteogram-temperatures"]/td[position() > %d '
'and position() <= %d]/span' % (offset, offset+length))(self)
return [float(_.strip(u'\xb0')) for _ in temps.split()]
def obj_low(self):
return Temperature(min(self.temperatures()), u'C')
def obj_high(self):
return Temperature(max(self.temperatures()), u'C')
def obj_text(self):
offset = int(CleanText('string(sum(./preceding-sibling::td/@colspan))')(self))
length = int(CleanText('@colspan')(self))
hour_test = ('../../tr[@class="meteogram-times"]/td[position() > %d and position() <= %d '
'and .//text() = "%%s"]' % (offset, offset+length))
hour_offset = 'string(count(%s/preceding-sibling::td)+1)' % (hour_test)
values = [
'../../../tbody/tr[@class="meteogram-weather-symbols"]/td[position() = %d]/div/@title',
'../../../tbody/tr[@class="meteogram-apparent-temperatures"]/td[position() = %d]/div/@title',
'../../../tbody/tr[@class="meteogram-wind-symbols"]/td[position() = %d]/div/@title',
'../../../tbody/tr[@class="meteogram-probabilities-of-precipitation"]/td[position() = %d]' +
'/div/@title',
'../../../tbody/tr[@class="meteogram-hourly-precipitation-values"]/td[position() = %d]/span/@title',
]
def descriptive_text_for_hour(hour):
hour_exists = CleanText(hour_test % hour)(self) == hour
if hour_exists:
offset = int(CleanText(hour_offset % hour)(self))
def info_for_value(value):
return CleanText(value % offset)(self).replace(u'edeltävän tunnin ', u'')
return ("klo %s: " % hour) + ", ".join(ifilter(bool, imap(info_for_value, values)))
return u'\n' + u'\n'.join(ifilter(bool, imap(descriptive_text_for_hour, ["02", "14"])))
@method
class get_current(ItemElement):
klass = Current
obj_id = date.today()
obj_date = Date(Regexp(CleanText('//table[@class="observation-text"]//span[@class="time-stamp"]'),
r'^(\d+\.\d+.\d+)'))
obj_text = Format(u'%s, %s, %s',
CleanText(u'(//table[@class="observation-text"])//tr[2]/td[2]'),
CleanText(u'(//table[@class="observation-text"])//tr[5]/td[1]'),
CleanText(u'(//table[@class="observation-text"])//tr[4]/td[2]'))
def obj_temp(self):
path = u'//table[@class="observation-text"]//span[@class="parameter-name" and text() = "Lämpötila"]' + \
u'/../span[@class="parameter-value"]'
temp = CleanDecimal(Regexp(CleanText(path), r'^([^ \xa0]+)'), replace_dots=True)(self)
unit = Regexp(CleanText(path), r'\xb0(\w)')(self)
return Temperature(float(temp), unit)
|
dkodnik/Ant | refs/heads/master | addons/account_analytic_analysis/res_config.py | 426 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_template_required': fields.boolean("Mandatory use of templates.",
implied_group='account_analytic_analysis.group_template_required',
help="Allows you to set the template field as required when creating an analytic account or a contract."),
}
|
Sodki/ansible-modules-extras | refs/heads/devel | monitoring/pagerduty.py | 132 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: pagerduty
short_description: Create PagerDuty maintenance windows
description:
- This module will let you create PagerDuty maintenance windows
version_added: "1.2"
author:
- "Andrew Newdigate (@suprememoocow)"
- "Dylan Silva (@thaumos)"
- "Justin Johns"
- "Bruce Pennypacker"
requirements:
- PagerDuty API access
options:
state:
description:
- Create a maintenance window or get a list of ongoing windows.
required: true
default: null
choices: [ "running", "started", "ongoing", "absent" ]
aliases: []
name:
description:
- PagerDuty unique subdomain.
required: true
default: null
choices: []
aliases: []
user:
description:
- PagerDuty user ID.
required: true
default: null
choices: []
aliases: []
passwd:
description:
- PagerDuty user password.
required: true
default: null
choices: []
aliases: []
token:
description:
- A pagerduty token, generated on the pagerduty site. Can be used instead of
user/passwd combination.
required: true
default: null
choices: []
aliases: []
version_added: '1.8'
requester_id:
description:
- ID of user making the request. Only needed when using a token and creating a maintenance_window.
required: true
default: null
choices: []
aliases: []
version_added: '1.8'
service:
description:
- A comma separated list of PagerDuty service IDs.
required: false
default: null
choices: []
aliases: [ services ]
hours:
description:
- Length of maintenance window in hours.
required: false
default: 1
choices: []
aliases: []
minutes:
description:
- Maintenance window in minutes (this is added to the hours).
required: false
default: 0
choices: []
aliases: []
version_added: '1.8'
desc:
description:
- Short description of maintenance window.
required: false
default: Created by Ansible
choices: []
aliases: []
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES='''
# List ongoing maintenance windows using a user/passwd
- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing
# List ongoing maintenance windows using a token
- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing
# Create a 1 hour maintenance window for service FOO123, using a user/passwd
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=running
service=FOO123
# Create a 5 minute maintenance window for service FOO123, using a token
- pagerduty: name=companyabc
token=xxxxxxxxxxxxxx
hours=0
minutes=5
state=running
service=FOO123
# Create a 4 hour maintenance window for service FOO123 with the description "deployment".
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=running
service=FOO123
hours=4
desc=deployment
register: pd_window
# Delete the previous maintenance window
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=absent
service={{ pd_window.result.maintenance_window.id }}
'''
import datetime
import base64
def auth_header(user, passwd, token):
if token:
return "Token token=%s" % token
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
return "Basic %s" % auth
def ongoing(module, name, user, passwd, token):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
headers = {"Authorization": auth_header(user, passwd, token)}
response, info = fetch_url(module, url, headers=headers)
if info['status'] != 200:
module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
try:
json_out = json.loads(response.read())
except:
json_out = ""
return False, json_out, False
def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):
now = datetime.datetime.utcnow()
later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows"
headers = {
'Authorization': auth_header(user, passwd, token),
'Content-Type' : 'application/json',
}
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}
if requester_id:
request_data['requester_id'] = requester_id
else:
if token:
module.fail_json(msg="requester_id is required when using a token")
data = json.dumps(request_data)
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
if info['status'] != 200:
module.fail_json(msg="failed to create the window: %s" % info['msg'])
try:
json_out = json.loads(response.read())
except:
json_out = ""
return False, json_out, True
def absent(module, name, user, passwd, token, requester_id, service):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/" + service[0]
headers = {
'Authorization': auth_header(user, passwd, token),
'Content-Type' : 'application/json',
}
request_data = {}
if requester_id:
request_data['requester_id'] = requester_id
else:
if token:
module.fail_json(msg="requester_id is required when using a token")
data = json.dumps(request_data)
response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
if info['status'] != 200:
module.fail_json(msg="failed to delete the window: %s" % info['msg'])
try:
json_out = json.loads(response.read())
except:
json_out = ""
return False, json_out, True
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
name=dict(required=True),
user=dict(required=False),
passwd=dict(required=False),
token=dict(required=False),
service=dict(required=False, type='list', aliases=["services"]),
requester_id=dict(required=False),
hours=dict(default='1', required=False),
minutes=dict(default='0', required=False),
desc=dict(default='Created by Ansible', required=False),
validate_certs = dict(default='yes', type='bool'),
)
)
state = module.params['state']
name = module.params['name']
user = module.params['user']
passwd = module.params['passwd']
token = module.params['token']
service = module.params['service']
hours = module.params['hours']
minutes = module.params['minutes']
token = module.params['token']
desc = module.params['desc']
requester_id = module.params['requester_id']
if not token and not (user or passwd):
module.fail_json(msg="neither user and passwd nor token specified")
if state == "running" or state == "started":
if not service:
module.fail_json(msg="service not specified")
(rc, out, changed) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
if rc == 0:
changed=True
if state == "ongoing":
(rc, out, changed) = ongoing(module, name, user, passwd, token)
if state == "absent":
(rc, out, changed) = absent(module, name, user, passwd, token, requester_id, service)
if rc != 0:
module.fail_json(msg="failed", result=out)
module.exit_json(msg="success", result=out, changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
margaritis/iTerm2 | refs/heads/master | tools/ply/ply-3.4/test/lex_rule2.py | 174 | # lex_rule2.py
#
# Rule function with incorrect number of arguments
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = [
"PLUS",
"MINUS",
"NUMBER",
]
t_PLUS = r'\+'
t_MINUS = r'-'
def t_NUMBER():
r'\d+'
return t
def t_error(t):
pass
lex.lex()
|
JamesClough/networkx | refs/heads/inverse_line_graph | networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py | 7 | #!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx as nx
from nose.plugins.attrib import attr
from networkx import edge_current_flow_betweenness_centrality \
as edge_current_flow
from networkx import edge_current_flow_betweenness_centrality_subset \
as edge_current_flow_subset
class TestFlowBetweennessCentrality(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
try:
import numpy as np
import scipy
except ImportError:
raise SkipTest('NumPy not available.')
def test_K4_normalized(self):
"""Betweenness centrality: K4"""
G=nx.complete_graph(4)
b=nx.current_flow_betweenness_centrality_subset(G,
list(G),
list(G),
normalized=True)
b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_K4(self):
"""Betweenness centrality: K4"""
G=nx.complete_graph(4)
b=nx.current_flow_betweenness_centrality_subset(G,
list(G),
list(G),
normalized=True)
b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
# test weighted network
G.add_edge(0,1,weight=0.5,other=0.3)
b=nx.current_flow_betweenness_centrality_subset(G,
list(G),
list(G),
normalized=True,
weight=None)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
b=nx.current_flow_betweenness_centrality_subset(G,
list(G),
list(G),
normalized=True)
b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
b=nx.current_flow_betweenness_centrality_subset(G,
list(G),
list(G),
normalized=True,
weight='other')
b_answer=nx.current_flow_betweenness_centrality(G,normalized=True,weight='other')
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P4_normalized(self):
"""Betweenness centrality: P4 normalized"""
G=nx.path_graph(4)
b=nx.current_flow_betweenness_centrality_subset(G,
list(G),
list(G),
normalized=True)
b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P4(self):
"""Betweenness centrality: P4"""
G=nx.path_graph(4)
b=nx.current_flow_betweenness_centrality_subset(G,
list(G),
list(G),
normalized=True)
b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_star(self):
"""Betweenness centrality: star """
G=nx.Graph()
nx.add_star(G, ['a', 'b', 'c', 'd'])
b=nx.current_flow_betweenness_centrality_subset(G,
list(G),
list(G),
normalized=True)
b_answer=nx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
# class TestWeightedFlowBetweennessCentrality():
# pass
class TestEdgeFlowBetweennessCentrality(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
try:
import numpy as np
import scipy
except ImportError:
raise SkipTest('NumPy not available.')
def test_K4_normalized(self):
"""Betweenness centrality: K4"""
G=nx.complete_graph(4)
b=edge_current_flow_subset(G,list(G),list(G),normalized=True)
b_answer=edge_current_flow(G,normalized=True)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
def test_K4(self):
"""Betweenness centrality: K4"""
G=nx.complete_graph(4)
b=edge_current_flow_subset(G,list(G),list(G),normalized=False)
b_answer=edge_current_flow(G,normalized=False)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
# test weighted network
G.add_edge(0,1,weight=0.5,other=0.3)
b=edge_current_flow_subset(G,list(G),list(G),normalized=False,weight=None)
# weight is None => same as unweighted network
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
b=edge_current_flow_subset(G,list(G),list(G),normalized=False)
b_answer=edge_current_flow(G,normalized=False)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
b=edge_current_flow_subset(G,list(G),list(G),normalized=False,weight='other')
b_answer=edge_current_flow(G,normalized=False,weight='other')
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
def test_C4(self):
"""Edge betweenness centrality: C4"""
G=nx.cycle_graph(4)
b=edge_current_flow_subset(G,list(G),list(G),normalized=True)
b_answer=edge_current_flow(G,normalized=True)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
def test_P4(self):
"""Edge betweenness centrality: P4"""
G=nx.path_graph(4)
b=edge_current_flow_subset(G, list(G), list(G), normalized=True)
b_answer=edge_current_flow(G,normalized=True)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
|
numba/numba | refs/heads/master | numba/misc/help/inspector.py | 4 | """
This file contains `__main__` so that it can be run as a commandline tool.
This file contains functions to inspect Numba's support for a given Python
module or a Python package.
"""
import argparse
import pkgutil
import warnings
import types as pytypes
from numba.core import errors
from numba._version import get_versions
from numba.core.registry import cpu_target
from numba.tests.support import captured_stdout
def _get_commit():
full = get_versions()['full'].split('.')[0]
if not full:
warnings.warn(
"Cannot find git commit hash. Source links could be inaccurate.",
category=errors.NumbaWarning,
)
return 'master'
return full
commit = _get_commit()
github_url = 'https://github.com/numba/numba/blob/{commit}/{path}#L{firstline}-L{lastline}' # noqa: E501
def inspect_function(function, target=None):
"""Return information about the support of a function.
Returns
-------
info : dict
Defined keys:
- "numba_type": str or None
The numba type object of the function if supported.
- "explained": str
A textual description of the support.
- "source_infos": dict
A dictionary containing the source location of each definition.
"""
target = target or cpu_target
tyct = target.typing_context
# Make sure we have loaded all extensions
tyct.refresh()
target.target_context.refresh()
info = {}
# Try getting the function type
source_infos = {}
try:
nbty = tyct.resolve_value_type(function)
except ValueError:
nbty = None
explained = 'not supported'
else:
# Make a longer explanation of the type
explained = tyct.explain_function_type(nbty)
for temp in nbty.templates:
try:
source_infos[temp] = temp.get_source_info()
except AttributeError:
source_infos[temp] = None
info['numba_type'] = nbty
info['explained'] = explained
info['source_infos'] = source_infos
return info
def inspect_module(module, target=None, alias=None):
"""Inspect a module object and yielding results from `inspect_function()`
for each function object in the module.
"""
alias = {} if alias is None else alias
# Walk the module
for name in dir(module):
if name.startswith('_'):
# Skip
continue
obj = getattr(module, name)
supported_types = (pytypes.FunctionType, pytypes.BuiltinFunctionType)
if not isinstance(obj, supported_types):
# Skip if it's not a function
continue
info = dict(module=module, name=name, obj=obj)
if obj in alias:
info['alias'] = alias[obj]
else:
alias[obj] = "{module}.{name}".format(module=module.__name__,
name=name)
info.update(inspect_function(obj, target=target))
yield info
class _Stat(object):
"""For gathering simple statistic of (un)supported functions"""
def __init__(self):
self.supported = 0
self.unsupported = 0
@property
def total(self):
total = self.supported + self.unsupported
return total
@property
def ratio(self):
ratio = self.supported / self.total * 100
return ratio
def describe(self):
if self.total == 0:
return "empty"
return "supported = {supported} / {total} = {ratio:.2f}%".format(
supported=self.supported,
total=self.total,
ratio=self.ratio,
)
def __repr__(self):
return "{clsname}({describe})".format(
clsname=self.__class__.__name__,
describe=self.describe(),
)
def filter_private_module(module_components):
return not any(x.startswith('_') for x in module_components)
def filter_tests_module(module_components):
return not any(x == 'tests' for x in module_components)
_default_module_filters = (
filter_private_module,
filter_tests_module,
)
def list_modules_in_package(package, module_filters=_default_module_filters):
"""Yield all modules in a given package.
Recursively walks the package tree.
"""
onerror_ignore = lambda _: None
prefix = package.__name__ + "."
package_walker = pkgutil.walk_packages(
package.__path__,
prefix,
onerror=onerror_ignore,
)
def check_filter(modname):
module_components = modname.split('.')
return any(not filter_fn(module_components)
for filter_fn in module_filters)
modname = package.__name__
if not check_filter(modname):
yield package
for pkginfo in package_walker:
modname = pkginfo[1]
if check_filter(modname):
continue
# In case importing of the module print to stdout
with captured_stdout():
try:
# Import the module
mod = __import__(modname)
except Exception:
continue
# Extract the module
for part in modname.split('.')[1:]:
try:
mod = getattr(mod, part)
except AttributeError:
# Suppress error in getting the attribute
mod = None
break
# Ignore if mod is not a module
if not isinstance(mod, pytypes.ModuleType):
# Skip non-module
continue
yield mod
class Formatter(object):
"""Base class for formatters.
"""
def __init__(self, fileobj):
self._fileobj = fileobj
def print(self, *args, **kwargs):
kwargs.setdefault('file', self._fileobj)
print(*args, **kwargs)
class HTMLFormatter(Formatter):
"""Formatter that outputs HTML
"""
def escape(self, text):
import html
return html.escape(text)
def title(self, text):
self.print('<h1>', text, '</h2>')
def begin_module_section(self, modname):
self.print('<h2>', modname, '</h2>')
self.print('<ul>')
def end_module_section(self):
self.print('</ul>')
def write_supported_item(self, modname, itemname, typename, explained,
sources, alias):
self.print('<li>')
self.print('{}.<b>{}</b>'.format(
modname,
itemname,
))
self.print(': <b>{}</b>'.format(typename))
self.print('<div><pre>', explained, '</pre></div>')
self.print("<ul>")
for tcls, source in sources.items():
if source:
self.print("<li>")
impl = source['name']
sig = source['sig']
filename = source['filename']
lines = source['lines']
self.print(
"<p>defined by <b>{}</b>{} at {}:{}-{}</p>".format(
self.escape(impl), self.escape(sig),
self.escape(filename), lines[0], lines[1],
),
)
self.print('<p>{}</p>'.format(
self.escape(source['docstring'] or '')
))
else:
self.print("<li>{}".format(self.escape(str(tcls))))
self.print("</li>")
self.print("</ul>")
self.print('</li>')
def write_unsupported_item(self, modname, itemname):
self.print('<li>')
self.print('{}.<b>{}</b>: UNSUPPORTED'.format(
modname,
itemname,
))
self.print('</li>')
def write_statistic(self, stats):
self.print('<p>{}</p>'.format(stats.describe()))
class ReSTFormatter(Formatter):
"""Formatter that output ReSTructured text format for Sphinx docs.
"""
def escape(self, text):
return text
def title(self, text):
self.print(text)
self.print('=' * len(text))
self.print()
def begin_module_section(self, modname):
self.print(modname)
self.print('-' * len(modname))
self.print()
def end_module_section(self):
self.print()
def write_supported_item(self, modname, itemname, typename, explained,
sources, alias):
self.print('.. function:: {}.{}'.format(modname, itemname))
self.print(' :noindex:')
self.print()
if alias:
self.print(" Alias to: ``{}``".format(alias))
self.print()
for tcls, source in sources.items():
if source:
impl = source['name']
sig = source['sig']
filename = source['filename']
lines = source['lines']
source_link = github_url.format(
commit=commit,
path=filename,
firstline=lines[0],
lastline=lines[1],
)
self.print(
" - defined by ``{}{}`` at `{}:{}-{} <{}>`_".format(
impl, sig, filename, lines[0], lines[1], source_link,
),
)
else:
self.print(" - defined by ``{}``".format(str(tcls)))
self.print()
def write_unsupported_item(self, modname, itemname):
pass
def write_statistic(self, stat):
if stat.supported == 0:
self.print("This module is not supported.")
else:
msg = "Not showing {} unsupported functions."
self.print(msg.format(stat.unsupported))
self.print()
self.print(stat.describe())
self.print()
def _format_module_infos(formatter, package_name, mod_sequence, target=None):
"""Format modules.
"""
formatter.title('Listings for {}'.format(package_name))
alias_map = {} # remember object seen to track alias
for mod in mod_sequence:
stat = _Stat()
modname = mod.__name__
formatter.begin_module_section(formatter.escape(modname))
for info in inspect_module(mod, target=target, alias=alias_map):
nbtype = info['numba_type']
if nbtype is not None:
stat.supported += 1
formatter.write_supported_item(
modname=formatter.escape(info['module'].__name__),
itemname=formatter.escape(info['name']),
typename=formatter.escape(str(nbtype)),
explained=formatter.escape(info['explained']),
sources=info['source_infos'],
alias=info.get('alias'),
)
else:
stat.unsupported += 1
formatter.write_unsupported_item(
modname=formatter.escape(info['module'].__name__),
itemname=formatter.escape(info['name']),
)
formatter.write_statistic(stat)
formatter.end_module_section()
def write_listings(package_name, filename, output_format):
"""Write listing information into a file.
Parameters
----------
package_name : str
Name of the package to inspect.
filename : str
Output filename. Always overwrite.
output_format : str
Support formats are "html" and "rst".
"""
package = __import__(package_name)
if hasattr(package, '__path__'):
mods = list_modules_in_package(package)
else:
mods = [package]
if output_format == 'html':
with open(filename + '.html', 'w') as fout:
fmtr = HTMLFormatter(fileobj=fout)
_format_module_infos(fmtr, package_name, mods)
elif output_format == 'rst':
with open(filename + '.rst', 'w') as fout:
fmtr = ReSTFormatter(fileobj=fout)
_format_module_infos(fmtr, package_name, mods)
else:
raise ValueError(
"Output format '{}' is not supported".format(output_format))
program_description = """
Inspect Numba support for a given top-level package.
""".strip()
def main():
parser = argparse.ArgumentParser(description=program_description)
parser.add_argument(
'package', metavar='package', type=str,
help='Package to inspect',
)
parser.add_argument(
'--format', dest='format', default='html',
help='Output format; i.e. "html", "rst"',
)
parser.add_argument(
'--file', dest='file', default='inspector_output',
help='Output filename. Defaults to "inspector_output.<format>"',
)
args = parser.parse_args()
package_name = args.package
output_format = args.format
filename = args.file
write_listings(package_name, filename, output_format)
if __name__ == '__main__':
main()
|
hydralabs/pyamf | refs/heads/master | doc/tutorials/examples/gateways/appengine/demo/simplejson/__init__.py | 10 | r"""
A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson
>>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> simplejson.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> simplejson.loads('1.1', parse_float=decimal.Decimal)
decimal.Decimal(1.1)
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson
Expecting property name: line 1 column 2 (char 2)
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.8.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
if __name__ == '__main__':
from simplejson.decoder import JSONDecoder
from simplejson.encoder import JSONEncoder
else:
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
#
# Compatibility cruft from other libraries
#
def decode(s):
"""
demjson, python-cjson API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of decode(s)",
DeprecationWarning)
return loads(s)
def encode(obj):
"""
demjson, python-cjson compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of encode(s)",
DeprecationWarning)
return dumps(obj)
def read(s):
"""
jsonlib, JsonUtils, python-json, json-py API compatibility hook.
Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
jsonlib, JsonUtils, python-json, json-py API compatibility hook.
Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
#
# Pretty printer:
# curl http://mochikit.com/examples/ajax_tables/domains.json | python -msimplejson
#
def main():
import sys
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit("%s [infile [outfile]]" % (sys.argv[0],))
try:
obj = load(infile)
except ValueError, e:
raise SystemExit(e)
dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
|
z2care/sample-code-in-the-cloud | refs/heads/master | secure-chat/tchat.py | 1 | import webapp2
from google.appengine.api import users
from google.appengine.ext import db
import datetime
import os
from google.appengine.ext.webapp import template
#START: ChatMessage
class ChatMessage(db.Model):
user = db.StringProperty(required=True)
timestamp = db.DateTimeProperty(auto_now_add=True)
message = db.TextProperty(required=True)
chat = db.StringProperty(required=True)
CHATS = ['main', 'book', 'flame' ]
#END: ChatMessage
#START: UserRoles
class UserRole(db.Model):
name = db.StringProperty(required=True)
role = db.StringProperty(choices=["User", "admin", "privileged"],
default="User")
@staticmethod
def GetUserRole(name):
user_record = db.GqlQuery("SELECT * from UserRole WHERE " +
"name = :1",
name).get()
if user_record != None:
return user.role
else:
return "User"
#END: UserRoles
#START: ValidateRole
def ValidateUserRole(actual, required):
if required == "admin": #<callout id="co.admin-check"/>
return actual == "admin"
elif required == "privileged": #<callout id="co.priv-check"/>
return (actual == "admin" and actual or "privileged")
elif required == "User":
return True
else: #<callout id="co.priv-default"/>
return False
#END: ValidateRole
#START: NewChatRoom
class NewChatRoomHandler(webapp2.RequestHandler):
'''@login_required''' #<callout id="co.login-decorator"/>
#http://djangosnippets.org/snippets/691/
#http://flask.pocoo.org/docs/patterns/viewdecorators/
def get(self):
user = users.get_current_user()
role = GetUserRole(user) #<callout id="co.role-check"/>
if not ValidateRole(role, "privileged"):
self.response.headers["Context-Type"] = "text/html"
self.response.write(
"<html><head>\n" +
"<title>Insufficient Privileges</title>\n" +
"</head>\n" +
"<body><h1>Insufficient Privileges</h1>\n" +
"<p> I'm sorry but you aren't allowed to " +
"access this page</p>\n" +
"</body></html>\n")
else:
self.response.headers["Content-Type"] = "text/html"
template_values = {
'title': "MarkCC's AppEngine Chat Room",
}
path = os.path.join(os.path.dirname(__file__), 'new-chat.html')
page = template.render(path, template_values)
self.response.write(page)
#END: NewChatRoom
#START: NewChatRoomPost
class NewChatRoomPostHandler(webapp2.RequestHandler):
'''@login_required'''
def post(self):
user = users.get_current_user()
role = GetUserRole(user)
if not ValidateRole(role, "privileged"):
self.response.headers["Context-Type"] = "text/html"
self.response.write(
"<html><head><title>Insufficient Privileges</title></head>\n" +
"<body><h1>Insufficient Privileges</h1>\n" +
"<p> I'm sorry but you aren't allowed to access this page</p>\n" +
"</body></html>\n")
else:
newchat = cgi.escape(self.request.get("newchat"))
CreateChat(user, newchat)
self.response.write(
"<html><head><title>Chat Room Created</title></head>\n" +
"<body><h1>Chat Room Created</h1>\n" +
"<p> New chat room %s created.</p>\n"
"</body></html>\n" % newchat)
#END: NewChatRoomPost
#START: GenericChat
class GenericChatPage(webapp2.RequestHandler):
def get(self):
requested_chat = self.request.get("chat", default_value="none")
if requested_chat == "none" or requested_chat not in CHATS:
template_params = {
'title': "Error! Requested chat not found!",
'chatname': requested_chat,
}
error_template = os.path.join(os.path.dirname(__file__), 'error.html')
page = template.render(error_template, template_params)
self.response.write(page)
else:
messages = db.GqlQuery("SELECT * from ChatMessage WHERE chat = :1 "
"ORDER BY time", requested_chat)
template_params = {
'title': "MarkCC's AppEngine Chat Room",
'msg_list': messages,
'chat': requested_chat
}
path = os.path.join(os.path.dirname(__file__), 'multichat.html')
page = template.render(path, template_params)
self.response.write(page)
#END: GenericChat
#START: ChatRoomCounted
class ChatRoomCountedHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"] = "text/html"
messages = db.GqlQuery("SELECT * From ChatMessage ORDER BY time "
"DESC LIMIT 20")
msglist = list(messages).reverse()
for msg in msglist:
msg.deltatime = datetime.datetime.now() - msg.timestamp
template_values = {
'title': "MarkCC's AppEngine Chat Room",
'msg_list': messages,
}
path = os.path.join(os.path.dirname(__file__), 'count.html')
page = template.render(path, template_values)
self.response.write(page)
#END: ChatRoomCounted
#START: LandingPage
class ChatRoomLandingPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"] = "text/html"
messages = db.GqlQuery("SELECT * From ChatMessage ORDER BY timestamp "
"DESC LIMIT 20")
# msglist = list(messages).reverse()
template_values = {
'title': "MarkCC's AppEngine Chat Room",
'msg_list': messages,
}
path = os.path.join(os.path.dirname(__file__), 'landing.html')
page = template.render(path, template_values)
self.response.write(page)
#END: LandingPage
#START: ChatRoomPoster
class ChatRoomPoster(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
msgtext = self.request.get("message")
chat = self.request.get("chat")
msg = ChatMessage(user=user.nickname(), message=msgtext, chat=chat)
msg.put()
# Now that we've added the message to the chat, we'll redirect
# to the root page,
self.redirect('/enterchat&chat=%s' % chat)
#END: ChatRoomPoster
# START: Frame
chatapp = webapp2.WSGIApplication([('/', ChatRoomLandingPage),
('/talk', ChatRoomPoster),
('/enterchat', GenericChatPage)])
# END: Frame
|
ericpre/hyperspy | refs/heads/RELEASE_next_minor | hyperspy/tests/io/test_bruker.py | 2 | import json
import os
import numpy as np
import pytest
from hyperspy import signals
from hyperspy.io import load
from hyperspy.misc.test_utils import assert_deep_almost_equal
test_files = ['30x30_instructively_packed_16bit_compressed.bcf',
'16x16_12bit_packed_8bit.bcf',
'P45_the_default_job.bcf',
'test_TEM.bcf',
'Hitachi_TM3030Plus.bcf',
'over16bit.bcf',
'bcf_v2_50x50px.bcf',
'bcf-edx-ebsd.bcf']
np_file = ['30x30_16bit.npy', '30x30_16bit_ds.npy']
spx_files = ['extracted_from_bcf.spx',
'bruker_nano.spx']
my_path = os.path.dirname(__file__)
def test_load_16bit():
# test bcf from hyperspy load function level
# some of functions can be not covered
# it cant use cython parsing implementation, as it is not compiled
filename = os.path.join(my_path, 'bruker_data', test_files[0])
print('testing bcf instructively packed 16bit...')
s = load(filename)
bse, hype = s
# Bruker saves all images in true 16bit:
assert bse.data.dtype == np.uint16
assert bse.data.shape == (30, 30)
np_filename = os.path.join(my_path, 'bruker_data', np_file[0])
np.testing.assert_array_equal(hype.data[:, :, 222:224],
np.load(np_filename))
assert hype.data.shape == (30, 30, 2048)
def test_load_16bit_reduced():
filename = os.path.join(my_path, 'bruker_data', test_files[0])
print('testing downsampled 16bit bcf...')
s = load(filename, downsample=4, cutoff_at_kV=10)
bse, hype = s
# sem images are never downsampled
assert bse.data.shape == (30, 30)
np_filename = os.path.join(my_path, 'bruker_data', np_file[1])
np.testing.assert_array_equal(hype.data[:, :, 222:224],
np.load(np_filename))
assert hype.data.shape == (8, 8, 1047)
# Bruker saves all images in true 16bit:
assert bse.data.dtype == np.uint16
# hypermaps should always return unsigned integers:
assert str(hype.data.dtype)[0] == 'u'
def test_load_8bit():
for bcffile in test_files[1:3]:
filename = os.path.join(my_path, 'bruker_data', bcffile)
print('testing simple 8bit bcf...')
s = load(filename)
bse, hype = s[0], s[-1]
# Bruker saves all images in true 16bit:
assert bse.data.dtype == np.uint16
# hypermaps should always return unsigned integers:
assert str(hype.data.dtype)[0] == 'u'
def test_hyperspy_wrap():
filename = os.path.join(my_path, 'bruker_data', test_files[0])
print('testing bcf wrap to hyperspy signal...')
from hyperspy.exceptions import VisibleDeprecationWarning
with pytest.warns(VisibleDeprecationWarning):
hype = load(filename, select_type='spectrum')
hype = load(filename, select_type='spectrum_image')
np.testing.assert_allclose(
hype.axes_manager[0].scale,
1.66740910949362,
atol=1E-12)
np.testing.assert_allclose(
hype.axes_manager[1].scale,
1.66740910949362,
atol=1E-12)
assert hype.axes_manager[1].units == 'µm'
np.testing.assert_allclose(hype.axes_manager[2].scale, 0.009999)
np.testing.assert_allclose(hype.axes_manager[2].offset, -0.47225277)
assert hype.axes_manager[2].units == 'keV'
assert hype.axes_manager[2].is_binned == True
md_ref = {
'Acquisition_instrument': {
'SEM': {
'beam_energy': 20,
'magnification': 1819.22595,
'Detector': {
'EDS': {
'elevation_angle': 35.0,
'detector_type': 'XFlash 6|10',
'azimuth_angle': 90.0,
'real_time': 70.07298,
'energy_resolution_MnKa': 130.0}},
'Stage': {
'tilt_alpha': 0.0,
'rotation': 326.10089,
'x': 66940.81,
'y': 54233.16,
'z': 39194.77}}},
'General': {
'original_filename':
'30x30_instructively_packed_16bit_compressed.bcf',
'title': 'EDX',
'date': '2018-10-04',
'time': '13:02:07'},
'Sample': {
'name': 'chevkinite',
'elements': ['Al', 'C', 'Ca', 'Ce', 'Fe', 'Gd', 'K', 'Mg', 'Na',
'Nd', 'O', 'P', 'Si', 'Sm', 'Th', 'Ti'],
'xray_lines': ['Al_Ka', 'C_Ka', 'Ca_Ka', 'Ce_La', 'Fe_Ka',
'Gd_La', 'K_Ka', 'Mg_Ka', 'Na_Ka', 'Nd_La',
'O_Ka', 'P_Ka', 'Si_Ka', 'Sm_La', 'Th_Ma',
'Ti_Ka']},
'Signal': {
'quantity': 'X-rays (Counts)',
'signal_type': 'EDS_SEM'},
'_HyperSpy': {
'Folding': {'original_axes_manager': None,
'original_shape': None,
'signal_unfolded': False,
'unfolded': False}}}
filename_omd = os.path.join(my_path,
'bruker_data',
'30x30_original_metadata.json')
with open(filename_omd) as fn:
# original_metadata:
omd_ref = json.load(fn)
assert_deep_almost_equal(hype.metadata.as_dictionary(), md_ref)
assert_deep_almost_equal(hype.original_metadata.as_dictionary(), omd_ref)
assert hype.metadata.General.date == "2018-10-04"
assert hype.metadata.General.time == "13:02:07"
assert hype.metadata.Signal.quantity == "X-rays (Counts)"
def test_hyperspy_wrap_downsampled():
filename = os.path.join(my_path, 'bruker_data', test_files[0])
print('testing bcf wrap to hyperspy signal...')
hype = load(filename, select_type='spectrum_image', downsample=5)
np.testing.assert_allclose(
hype.axes_manager[0].scale,
8.337045547468101,
atol=1E-12)
np.testing.assert_allclose(
hype.axes_manager[1].scale,
8.337045547468101,
atol=1E-12)
assert hype.axes_manager[1].units == 'µm'
def test_get_mode():
filename = os.path.join(my_path, 'bruker_data', test_files[0])
s = load(filename, select_type='spectrum_image', instrument='SEM')
assert s.metadata.Signal.signal_type == "EDS_SEM"
assert isinstance(s, signals.EDSSEMSpectrum)
filename = os.path.join(my_path, 'bruker_data', test_files[0])
s = load(filename, select_type='spectrum_image', instrument='TEM')
assert s.metadata.Signal.signal_type == "EDS_TEM"
assert isinstance(s, signals.EDSTEMSpectrum)
filename = os.path.join(my_path, 'bruker_data', test_files[0])
s = load(filename, select_type='spectrum_image')
assert s.metadata.Signal.signal_type == "EDS_SEM"
assert isinstance(s, signals.EDSSEMSpectrum)
filename = os.path.join(my_path, 'bruker_data', test_files[3])
s = load(filename, select_type='spectrum_image')
assert s.metadata.Signal.signal_type == "EDS_TEM"
assert isinstance(s, signals.EDSTEMSpectrum)
def test_wrong_file():
filename = os.path.join(my_path, 'bruker_data', 'Nope.bcf')
with pytest.raises(TypeError):
load(filename)
def test_fast_bcf():
thingy = pytest.importorskip("hyperspy.io_plugins.unbcf_fast")
from hyperspy.io_plugins import bruker
for bcffile in test_files:
filename = os.path.join(my_path, 'bruker_data', bcffile)
thingy = bruker.BCF_reader(filename)
for j in range(2, 5, 1):
print('downsampling:', j)
bruker.fast_unbcf = True # manually enabling fast parsing
hmap1 = thingy.parse_hypermap(downsample=j) # using cython
bruker.fast_unbcf = False # manually disabling fast parsing
hmap2 = thingy.parse_hypermap(downsample=j) # py implementation
np.testing.assert_array_equal(hmap1, hmap2)
def test_decimal_regex():
from hyperspy.io_plugins.bruker import fix_dec_patterns
dummy_xml_positive = [b'<dummy_tag>85,658</dummy_tag>',
b'<dummy_tag>85,658E-8</dummy_tag>',
b'<dummy_tag>-85,658E-8</dummy_tag>',
b'<dum_tag>-85.658</dum_tag>', # negative check
b'<dum_tag>85.658E-8</dum_tag>'] # negative check
dummy_xml_negative = [b'<dum_tag>12,25,23,45,56,12,45</dum_tag>',
b'<dum_tag>12e1,23,-24E-5</dum_tag>']
for i in dummy_xml_positive:
assert b'85.658' in fix_dec_patterns.sub(b'\\1.\\2', i)
for j in dummy_xml_negative:
assert b'.' not in fix_dec_patterns.sub(b'\\1.\\2', j)
def test_all_spx_loads():
for spxfile in spx_files:
filename = os.path.join(my_path, 'bruker_data', spxfile)
s = load(filename)
assert s.data.dtype == np.uint64
assert s.metadata.Signal.signal_type == 'EDS_SEM'
def test_stand_alone_spx():
filename = os.path.join(my_path, 'bruker_data', 'bruker_nano.spx')
s = load(filename)
assert s.metadata.Sample.elements == ['Fe', 'S', 'Cu']
assert s.metadata.Acquisition_instrument.SEM.Detector.EDS.live_time == 7.385
def test_bruker_XRF():
# See https://github.com/hyperspy/hyperspy/issues/2689
# Bruker M6 Jetstream SPX
filename = os.path.join(my_path, 'bruker_data',
'bruker_m6_jetstream_file_example.spx')
s = load(filename)
assert s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time == 28.046
assert s.metadata.Acquisition_instrument.TEM.beam_energy == 50
|
ksrajkumar/openerp-6.1 | refs/heads/master | openerp/pychart/afm/Courier_Oblique.py | 15 | # -*- coding: utf-8 -*-
# AFM font Courier-Oblique (path: /usr/share/fonts/afms/adobe/pcrro8a.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["Courier-Oblique"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 600, 600, 600, 500, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 500, 600, 600, 600, 600, 600, 600, 600, 600, 500, 600, 600, 500, 600, 600, 600, 600, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 600, 500, 600, 500, 500, 500, 500, 600, 600, 600, 600, 500, 500, 500, 500, 500, 600, 500, 500, 500, 600, 500, 500, 600, 600, 600, 600, )
|
40223246/0622W17test2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/browser/indexed_db.py | 632 | class EventListener:
def __init__(self, events=[]):
self._events=events
def append(self, event):
self._events.append(event)
def fire(self, e):
for _event in self._events:
_event(e)
class IndexedDB:
def __init__(self):
if not __BRYTHON__.has_indexedDB:
raise NotImplementedError("Your browser doesn't support indexedDB")
return
self._indexedDB=__BRYTHON__.indexedDB()
self._db=None
self._version=None
def _onsuccess(self, event):
self._db=event.target.result
def open(self, name, onsuccess, version=1.0, onerror=None,
onupgradeneeded=None):
self._version=version
_result=self._indexedDB.open(name, version)
_success=EventListener([self._onsuccess, onsuccess])
_result.onsuccess=_success.fire
_result.onupgradeneeded=onupgradeneeded
#if onerror is None:
def onerror(e):
print("onerror: %s:%s" % (e.type, e.target.result))
def onblocked(e):
print("blocked: %s:%s" % (e.type, e.result))
_result.onerror=onerror
_result.onblocked=onblocked
def transaction(self, entities, mode='read'):
return Transaction(self._db.transaction(entities, mode))
class Transaction:
def __init__(self, transaction):
self._transaction=transaction
def objectStore(self, name):
return ObjectStore(self._transaction.objectStore(name))
class ObjectStore:
def __init__(self, objectStore):
self._objectStore=objectStore
self._data=[]
def clear(self, onsuccess=None, onerror=None):
_result=self._objectStore.clear()
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def _helper(self, func, object, onsuccess=None, onerror=None):
_result=func(object)
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def put(self, obj, key=None, onsuccess=None, onerror=None):
_r = self._objectStore.put(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
def add(self, obj, key, onsuccess=None, onerror=None):
_r = self._objectStore.add(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
#self._helper(self._objectStore.add, object, onsuccess, onerror)
def delete(self, index, onsuccess=None, onerror=None):
self._helper(self._objectStore.delete, index, onsuccess, onerror)
def query(self, *args):
self._data=[]
def onsuccess(event):
cursor=event.target.result
if cursor is not None:
self._data.append(cursor.value)
getattr(cursor,"continue")() # cursor.continue() is illegal
self._objectStore.openCursor(args).onsuccess=onsuccess
def fetchall(self):
yield self._data
def get(self, key, onsuccess=None, onerror=None):
self._helper(self._objectStore.get, key, onsuccess, onerror)
|
jruiperezv/ANALYSE | refs/heads/master | common/djangoapps/enrollment/urls.py | 8 | """
URLs for the Enrollment API
"""
from django.conf import settings
from django.conf.urls import patterns, url
from .views import get_course_enrollment, list_student_enrollments
urlpatterns = []
if settings.FEATURES.get('ENABLE_COMBINED_LOGIN_REGISTRATION'):
urlpatterns += patterns(
'enrollment.views',
url(r'^student$', list_student_enrollments, name='courseenrollments'),
url(
r'^course/{course_key}$'.format(course_key=settings.COURSE_ID_PATTERN),
get_course_enrollment,
name='courseenrollment'
),
)
|
NobleNoob/buildpack | refs/heads/master | lib/build_pack_utils/cloudfoundry.py | 12 | import os
import sys
import json
import tempfile
import shutil
import utils
import logging
from urlparse import urlparse
from zips import UnzipUtil
from hashes import HashUtil
from cache import DirectoryCacheManager
from downloads import Downloader
from downloads import CurlDownloader
from utils import safe_makedirs
_log = logging.getLogger('cloudfoundry')
class CloudFoundryUtil(object):
@staticmethod
def initialize():
# Open stdout unbuffered
if hasattr(sys.stdout, 'fileno'):
sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
ctx = utils.FormattedDict()
# Add environment variables
ctx.update(os.environ)
# Convert JSON env variables
ctx['VCAP_APPLICATION'] = json.loads(ctx.get('VCAP_APPLICATION',
'{}',
format=False))
ctx['VCAP_SERVICES'] = json.loads(ctx.get('VCAP_SERVICES',
'{}',
format=False))
# Build Pack Location
ctx['BP_DIR'] = os.path.dirname(os.path.dirname(sys.argv[0]))
# User's Application Files, build droplet here
ctx['BUILD_DIR'] = sys.argv[1]
# Cache space for the build pack
ctx['CACHE_DIR'] = (len(sys.argv) == 3) and sys.argv[2] or None
# Temp space
if 'TMPDIR' not in ctx.keys():
ctx['TMPDIR'] = tempfile.gettempdir()
# Make sure cache & build directories exist
if not os.path.exists(ctx['BUILD_DIR']):
os.makedirs(ctx['BUILD_DIR'])
if ctx['CACHE_DIR'] and not os.path.exists(ctx['CACHE_DIR']):
os.makedirs(ctx['CACHE_DIR'])
# Add place holder for extensions
ctx['EXTENSIONS'] = []
# Init Logging
CloudFoundryUtil.init_logging(ctx)
_log.info('CloudFoundry Initialized.')
_log.debug("CloudFoundry Context Setup [%s]", ctx)
return ctx
@staticmethod
def init_logging(ctx):
logFmt = '%(asctime)s [%(levelname)s] %(name)s - %(message)s'
if ctx.get('BP_DEBUG', False):
logging.basicConfig(level=logging.DEBUG, format=logFmt)
else:
logLevelStr = ctx.get('BP_LOG_LEVEL', 'INFO')
logLevel = getattr(logging, logLevelStr, logging.INFO)
logDir = os.path.join(ctx['BUILD_DIR'], '.bp', 'logs')
safe_makedirs(logDir)
logging.basicConfig(level=logLevel, format=logFmt,
filename=os.path.join(logDir, 'bp.log'))
@staticmethod
def load_json_config_file_from(folder, cfgFile):
return CloudFoundryUtil.load_json_config_file(os.path.join(folder,
cfgFile))
@staticmethod
def load_json_config_file(cfgPath):
if os.path.exists(cfgPath):
_log.debug("Loading config from [%s]", cfgPath)
with open(cfgPath, 'rt') as cfgFile:
return json.load(cfgFile)
return {}
class CloudFoundryInstaller(object):
def __init__(self, ctx):
self._log = _log
self._ctx = ctx
self._unzipUtil = UnzipUtil(ctx)
self._hashUtil = HashUtil(ctx)
self._dcm = DirectoryCacheManager(ctx)
self._dwn = self._get_downloader(ctx)(ctx)
def _get_downloader(self, ctx):
method = ctx.get('DOWNLOAD_METHOD', 'python')
if method == 'python':
self._log.debug('Using python downloader.')
return Downloader
elif method == 'curl':
self._log.debug('Using cURL downloader.')
return CurlDownloader
elif method == 'custom':
fullClsName = ctx['DOWNLOAD_CLASS']
self._log.debug('Using custom downloader [%s].', fullClsName)
dotLoc = fullClsName.rfind('.')
if dotLoc >= 0:
clsName = fullClsName[dotLoc + 1: len(fullClsName)]
modName = fullClsName[0:dotLoc]
m = __import__(modName, globals(), locals(), [clsName])
try:
return getattr(m, clsName)
except AttributeError:
self._log.exception(
'WARNING: DOWNLOAD_CLASS not found!')
else:
self._log.error(
'WARNING: DOWNLOAD_CLASS invalid, must include '
'package name!')
return Downloader
def _is_url(self, val):
return urlparse(val).scheme != ''
def install_binary_direct(self, url, hsh, installDir,
fileName=None, strip=False):
self._log.debug("Installing direct [%s]", url)
if not fileName:
fileName = url.split('/')[-1]
if self._is_url(hsh):
digest = self._dwn.download_direct(hsh)
else:
digest = hsh
self._log.debug(
"Installing [%s] with digest [%s] into [%s] with "
"name [%s] stripping [%s]",
url, digest, installDir, fileName, strip)
fileToInstall = self._dcm.get(fileName, digest)
if fileToInstall is None:
self._log.debug('File [%s] not in cache.', fileName)
fileToInstall = os.path.join(self._ctx['TMPDIR'], fileName)
self._dwn.download(url, fileToInstall)
digest = self._hashUtil.calculate_hash(fileToInstall)
fileToInstall = self._dcm.put(fileName, fileToInstall, digest)
return self._unzipUtil.extract(fileToInstall,
installDir,
strip)
def install_binary(self, installKey):
self._log.debug('Installing [%s]', installKey)
url = self._ctx['%s_DOWNLOAD_URL' % installKey]
hashUrl = self._ctx.get(
'%s_HASH_DOWNLOAD_URL' % installKey,
"%s.%s" % (url, self._ctx['CACHE_HASH_ALGORITHM']))
installDir = os.path.join(self._ctx['BUILD_DIR'],
self._ctx.get(
'%s_PACKAGE_INSTALL_DIR' % installKey,
installKey.lower()))
strip = self._ctx.get('%s_STRIP' % installKey, False)
return self.install_binary_direct(url, hashUrl, installDir,
strip=strip)
def _install_from(self, fromPath, fromLoc, toLocation=None, ignore=None):
"""Copy file or directory from a location to the droplet
Copies a file or directory from a location to the application
droplet. Directories are copied recursively, but specific files
in those directories can be ignored by specifing the ignore parameter.
fromPath -> file to copy, relative build pack
fromLoc -> root of the from path. Full path to file or
directory to be copied is fromLoc + fromPath
toLocation -> optional location where to copy the file
relative to app droplet. If not specified
uses fromPath.
ignore -> an optional callable that is passed to
the ignore argument of shutil.copytree.
"""
self._log.debug("Install file [%s] from [%s]", fromPath, fromLoc)
fullPathFrom = os.path.join(fromLoc, fromPath)
if os.path.exists(fullPathFrom):
fullPathTo = os.path.join(
self._ctx['BUILD_DIR'],
((toLocation is None) and fromPath or toLocation))
safe_makedirs(os.path.dirname(fullPathTo))
self._log.debug("Copying [%s] to [%s]", fullPathFrom, fullPathTo)
if os.path.isfile(fullPathFrom):
shutil.copy(fullPathFrom, fullPathTo)
else:
utils.copytree(fullPathFrom, fullPathTo, ignore=ignore)
def install_from_build_pack(self, fromPath, toLocation=None, ignore=None):
"""Copy file or directory from the build pack to the droplet
Copies a file or directory from the build pack to the application
droplet. Directories are copied recursively, but specific files
in those directories can be ignored by specifing the ignore parameter.
fromPath -> file to copy, relative build pack
toLocation -> optional location where to copy the file
relative to app droplet. If not specified
uses fromPath.
ignore -> an optional callable that is passed to
the ignore argument of shutil.copytree.
"""
self._install_from(
fromPath,
self._ctx['BP_DIR'],
toLocation,
ignore)
def install_from_application(self, fromPath, toLocation, ignore=None):
"""Copy file or directory from one place to another in the application
Copies a file or directory from one place to another place within the
application droplet.
fromPath -> file or directory to copy, relative
to application droplet.
toLocation -> location where to copy the file,
relative to app droplet.
ignore -> optional callable that is passed to the
ignore argument of shutil.copytree
"""
self._install_from(
fromPath,
self._ctx['BUILD_DIR'],
toLocation,
ignore)
|
brendangregg/bcc | refs/heads/master | tools/tcpsubnet.py | 4 | #!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcpsubnet Summarize TCP bytes sent to different subnets.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpsubnet [-h] [-v] [-J] [-f FORMAT] [-i INTERVAL] [subnets]
#
# This uses dynamic tracing of kernel functions, and will need to be updated
# to match kernel changes.
#
# This is an adaptation of tcptop from written by Brendan Gregg.
#
# WARNING: This traces all send at the TCP level, and while it
# summarizes data in-kernel to reduce overhead, there may still be some
# overhead at high TCP send/receive rates (eg, ~13% of one CPU at 100k TCP
# events/sec. This is not the same as packet rate: funccount can be used to
# count the kprobes below to find out the TCP rate). Test in a lab environment
# first. If your send rate is low (eg, <1k/sec) then the overhead is
# expected to be negligible.
#
# Copyright 2017 Rodrigo Manyari
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 03-Oct-2017 Rodrigo Manyari Created this based on tcptop.
# 13-Feb-2018 Rodrigo Manyari Fix pep8 errors, some refactoring.
# 05-Mar-2018 Rodrigo Manyari Add date time to output.
import argparse
import json
import logging
import struct
import socket
from bcc import BPF
from datetime import datetime as dt
from time import sleep
# arguments
examples = """examples:
./tcpsubnet # Trace TCP sent to the default subnets:
# 127.0.0.1/32,10.0.0.0/8,172.16.0.0/12,
# 192.168.0.0/16,0.0.0.0/0
./tcpsubnet -f K # Trace TCP sent to the default subnets
# aggregated in KBytes.
./tcpsubnet 10.80.0.0/24 # Trace TCP sent to 10.80.0.0/24 only
./tcpsubnet -J # Format the output in JSON.
"""
default_subnets = "127.0.0.1/32,10.0.0.0/8," \
"172.16.0.0/12,192.168.0.0/16,0.0.0.0/0"
parser = argparse.ArgumentParser(
description="Summarize TCP send and aggregate by subnet",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("subnets", help="comma separated list of subnets",
type=str, nargs="?", default=default_subnets)
parser.add_argument("-v", "--verbose", action="store_true",
help="output debug statements")
parser.add_argument("-J", "--json", action="store_true",
help="format output in JSON")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument("-f", "--format", default="B",
help="[bkmBKM] format to report: bits, Kbits, Mbits, bytes, " +
"KBytes, MBytes (default B)", choices=["b", "k", "m", "B", "K", "M"])
parser.add_argument("-i", "--interval", default=1, type=int,
help="output interval, in seconds (default 1)")
args = parser.parse_args()
level = logging.INFO
if args.verbose:
level = logging.DEBUG
logging.basicConfig(level=level)
logging.debug("Starting with the following args:")
logging.debug(args)
# args checking
if int(args.interval) <= 0:
logging.error("Invalid interval, must be > 0. Exiting.")
exit(1)
else:
args.interval = int(args.interval)
# map of supported formats
formats = {
"b": lambda x: (x * 8),
"k": lambda x: ((x * 8) / 1024),
"m": lambda x: ((x * 8) / pow(1024, 2)),
"B": lambda x: x,
"K": lambda x: x / 1024,
"M": lambda x: x / pow(1024, 2)
}
# Let's swap the string with the actual numeric value
# once here so we don't have to do it on every interval
formatFn = formats[args.format]
# define the basic structure of the BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
struct index_key_t {
u32 index;
};
BPF_HASH(ipv4_send_bytes, struct index_key_t);
int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk,
struct msghdr *msg, size_t size)
{
u16 family = sk->__sk_common.skc_family;
if (family == AF_INET) {
u32 dst = sk->__sk_common.skc_daddr;
unsigned categorized = 0;
__SUBNETS__
}
return 0;
}
"""
# Takes in a mask and returns the integer equivalent
# e.g.
# mask_to_int(8) returns 4278190080
def mask_to_int(n):
return ((1 << n) - 1) << (32 - n)
# Takes in a list of subnets and returns a list
# of tuple-3 containing:
# - The subnet info at index 0
# - The addr portion as an int at index 1
# - The mask portion as an int at index 2
#
# e.g.
# parse_subnets([10.10.0.0/24]) returns
# [
# ['10.10.0.0/24', 168427520, 4294967040],
# ]
def parse_subnets(subnets):
m = []
for s in subnets:
parts = s.split("/")
if len(parts) != 2:
msg = "Subnet [%s] is invalid, please refer to the examples." % s
raise ValueError(msg)
netaddr_int = 0
mask_int = 0
try:
netaddr_int = struct.unpack("!I", socket.inet_aton(parts[0]))[0]
except:
msg = ("Invalid net address in subnet [%s], " +
"please refer to the examples.") % s
raise ValueError(msg)
try:
mask_int = int(parts[1])
except:
msg = "Invalid mask in subnet [%s]. Mask must be an int" % s
raise ValueError(msg)
if mask_int < 0 or mask_int > 32:
msg = ("Invalid mask in subnet [%s]. Must be an " +
"int between 0 and 32.") % s
raise ValueError(msg)
mask_int = mask_to_int(int(parts[1]))
m.append([s, netaddr_int, mask_int])
return m
def generate_bpf_subnets(subnets):
template = """
if (!categorized && (__NET_ADDR__ & __NET_MASK__) ==
(dst & __NET_MASK__)) {
struct index_key_t key = {.index = __POS__};
ipv4_send_bytes.increment(key, size);
categorized = 1;
}
"""
bpf = ''
for i, s in enumerate(subnets):
branch = template
branch = branch.replace("__NET_ADDR__", str(socket.htonl(s[1])))
branch = branch.replace("__NET_MASK__", str(socket.htonl(s[2])))
branch = branch.replace("__POS__", str(i))
bpf += branch
return bpf
subnets = []
if args.subnets:
subnets = args.subnets.split(",")
subnets = parse_subnets(subnets)
logging.debug("Packets are going to be categorized in the following subnets:")
logging.debug(subnets)
bpf_subnets = generate_bpf_subnets(subnets)
# initialize BPF
bpf_text = bpf_text.replace("__SUBNETS__", bpf_subnets)
logging.debug("Done preprocessing the BPF program, " +
"this is what will actually get executed:")
logging.debug(bpf_text)
if args.ebpf:
print(bpf_text)
exit()
b = BPF(text=bpf_text)
ipv4_send_bytes = b["ipv4_send_bytes"]
if not args.json:
print("Tracing... Output every %d secs. Hit Ctrl-C to end" % args.interval)
# output
exiting = 0
while (1):
try:
sleep(args.interval)
except KeyboardInterrupt:
exiting = 1
# IPv4: build dict of all seen keys
keys = ipv4_send_bytes
for k, v in ipv4_send_bytes.items():
if k not in keys:
keys[k] = v
# to hold json data
data = {}
# output
now = dt.now()
data['date'] = now.strftime('%x')
data['time'] = now.strftime('%X')
data['entries'] = {}
if not args.json:
print(now.strftime('[%x %X]'))
for k, v in reversed(sorted(keys.items(), key=lambda keys: keys[1].value)):
send_bytes = 0
if k in ipv4_send_bytes:
send_bytes = int(ipv4_send_bytes[k].value)
subnet = subnets[k.index][0]
send = formatFn(send_bytes)
if args.json:
data['entries'][subnet] = send
else:
print("%-21s %6d" % (subnet, send))
if args.json:
print(json.dumps(data))
ipv4_send_bytes.clear()
if exiting:
exit(0)
|
postlund/home-assistant | refs/heads/dev | homeassistant/components/opentherm_gw/__init__.py | 3 | """Support for OpenTherm Gateway devices."""
import asyncio
from datetime import date, datetime
import logging
import pyotgw
import pyotgw.vars as gw_vars
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as COMP_BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as COMP_CLIMATE
from homeassistant.components.sensor import DOMAIN as COMP_SENSOR
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_DATE,
ATTR_ID,
ATTR_MODE,
ATTR_TEMPERATURE,
ATTR_TIME,
CONF_DEVICE,
CONF_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
ATTR_DHW_OVRD,
ATTR_GW_ID,
ATTR_LEVEL,
CONF_CLIMATE,
CONF_FLOOR_TEMP,
CONF_PRECISION,
DATA_GATEWAYS,
DATA_OPENTHERM_GW,
DOMAIN,
SERVICE_RESET_GATEWAY,
SERVICE_SET_CLOCK,
SERVICE_SET_CONTROL_SETPOINT,
SERVICE_SET_GPIO_MODE,
SERVICE_SET_HOT_WATER_OVRD,
SERVICE_SET_LED_MODE,
SERVICE_SET_MAX_MOD,
SERVICE_SET_OAT,
SERVICE_SET_SB_TEMP,
)
_LOGGER = logging.getLogger(__name__)
CLIMATE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
vol.Optional(CONF_FLOOR_TEMP, default=False): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
{
vol.Required(CONF_DEVICE): cv.string,
vol.Optional(CONF_CLIMATE, default={}): CLIMATE_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def options_updated(hass, entry):
"""Handle options update."""
gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]]
async_dispatcher_send(hass, gateway.options_update_signal, entry)
async def async_setup_entry(hass, config_entry):
"""Set up the OpenTherm Gateway component."""
if DATA_OPENTHERM_GW not in hass.data:
hass.data[DATA_OPENTHERM_GW] = {DATA_GATEWAYS: {}}
gateway = OpenThermGatewayDevice(hass, config_entry)
hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][config_entry.data[CONF_ID]] = gateway
config_entry.add_update_listener(options_updated)
# Schedule directly on the loop to avoid blocking HA startup.
hass.loop.create_task(gateway.connect_and_subscribe())
for comp in [COMP_BINARY_SENSOR, COMP_CLIMATE, COMP_SENSOR]:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, comp)
)
register_services(hass)
return True
async def async_setup(hass, config):
"""Set up the OpenTherm Gateway component."""
if not hass.config_entries.async_entries(DOMAIN) and DOMAIN in config:
conf = config[DOMAIN]
for device_id, device_config in conf.items():
device_config[CONF_ID] = device_id
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=device_config
)
)
return True
def register_services(hass):
"""Register services for the component."""
service_reset_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
)
}
)
service_set_clock_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Optional(ATTR_DATE, default=date.today()): cv.date,
vol.Optional(ATTR_TIME, default=datetime.now().time()): cv.time,
}
)
service_set_control_setpoint_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=90)
),
}
)
service_set_hot_water_ovrd_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_DHW_OVRD): vol.Any(
vol.Equal("A"), vol.All(vol.Coerce(int), vol.Range(min=0, max=1))
),
}
)
service_set_gpio_mode_schema = vol.Schema(
vol.Any(
vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_ID): vol.Equal("A"),
vol.Required(ATTR_MODE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=6)
),
}
),
vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_ID): vol.Equal("B"),
vol.Required(ATTR_MODE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=7)
),
}
),
)
)
service_set_led_mode_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_ID): vol.In("ABCDEF"),
vol.Required(ATTR_MODE): vol.In("RXTBOFHWCEMP"),
}
)
service_set_max_mod_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_LEVEL): vol.All(
vol.Coerce(int), vol.Range(min=-1, max=100)
),
}
)
service_set_oat_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=-40, max=99)
),
}
)
service_set_sb_temp_schema = vol.Schema(
{
vol.Required(ATTR_GW_ID): vol.All(
cv.string, vol.In(hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS])
),
vol.Required(ATTR_TEMPERATURE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=30)
),
}
)
async def reset_gateway(call):
"""Reset the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
mode_rst = gw_vars.OTGW_MODE_RESET
status = await gw_dev.gateway.set_mode(mode_rst)
gw_dev.status = status
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_RESET_GATEWAY, reset_gateway, service_reset_schema
)
async def set_control_setpoint(call):
"""Set the control setpoint on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_CONTROL_SETPOINT
value = await gw_dev.gateway.set_control_setpoint(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN,
SERVICE_SET_CONTROL_SETPOINT,
set_control_setpoint,
service_set_control_setpoint_schema,
)
async def set_dhw_ovrd(call):
"""Set the domestic hot water override on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.OTGW_DHW_OVRD
value = await gw_dev.gateway.set_hot_water_ovrd(call.data[ATTR_DHW_OVRD])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN,
SERVICE_SET_HOT_WATER_OVRD,
set_dhw_ovrd,
service_set_hot_water_ovrd_schema,
)
async def set_device_clock(call):
"""Set the clock on the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
attr_date = call.data[ATTR_DATE]
attr_time = call.data[ATTR_TIME]
await gw_dev.gateway.set_clock(datetime.combine(attr_date, attr_time))
hass.services.async_register(
DOMAIN, SERVICE_SET_CLOCK, set_device_clock, service_set_clock_schema
)
async def set_gpio_mode(call):
"""Set the OpenTherm Gateway GPIO modes."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gpio_id = call.data[ATTR_ID]
gpio_mode = call.data[ATTR_MODE]
mode = await gw_dev.gateway.set_gpio_mode(gpio_id, gpio_mode)
gpio_var = getattr(gw_vars, f"OTGW_GPIO_{gpio_id}")
gw_dev.status.update({gpio_var: mode})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_GPIO_MODE, set_gpio_mode, service_set_gpio_mode_schema
)
async def set_led_mode(call):
"""Set the OpenTherm Gateway LED modes."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
led_id = call.data[ATTR_ID]
led_mode = call.data[ATTR_MODE]
mode = await gw_dev.gateway.set_led_mode(led_id, led_mode)
led_var = getattr(gw_vars, f"OTGW_LED_{led_id}")
gw_dev.status.update({led_var: mode})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_LED_MODE, set_led_mode, service_set_led_mode_schema
)
async def set_max_mod(call):
"""Set the max modulation level."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_SLAVE_MAX_RELATIVE_MOD
level = call.data[ATTR_LEVEL]
if level == -1:
# Backend only clears setting on non-numeric values.
level = "-"
value = await gw_dev.gateway.set_max_relative_mod(level)
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_MAX_MOD, set_max_mod, service_set_max_mod_schema
)
async def set_outside_temp(call):
"""Provide the outside temperature to the OpenTherm Gateway."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.DATA_OUTSIDE_TEMP
value = await gw_dev.gateway.set_outside_temp(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_OAT, set_outside_temp, service_set_oat_schema
)
async def set_setback_temp(call):
"""Set the OpenTherm Gateway SetBack temperature."""
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][call.data[ATTR_GW_ID]]
gw_var = gw_vars.OTGW_SB_TEMP
value = await gw_dev.gateway.set_setback_temp(call.data[ATTR_TEMPERATURE])
gw_dev.status.update({gw_var: value})
async_dispatcher_send(hass, gw_dev.update_signal, gw_dev.status)
hass.services.async_register(
DOMAIN, SERVICE_SET_SB_TEMP, set_setback_temp, service_set_sb_temp_schema
)
async def async_unload_entry(hass, entry):
"""Cleanup and disconnect from gateway."""
await asyncio.gather(
hass.config_entries.async_forward_entry_unload(entry, COMP_BINARY_SENSOR),
hass.config_entries.async_forward_entry_unload(entry, COMP_CLIMATE),
hass.config_entries.async_forward_entry_unload(entry, COMP_SENSOR),
)
gateway = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][entry.data[CONF_ID]]
await gateway.cleanup()
return True
class OpenThermGatewayDevice:
"""OpenTherm Gateway device class."""
def __init__(self, hass, config_entry):
"""Initialize the OpenTherm Gateway."""
self.hass = hass
self.device_path = config_entry.data[CONF_DEVICE]
self.gw_id = config_entry.data[CONF_ID]
self.name = config_entry.data[CONF_NAME]
self.climate_config = config_entry.options
self.status = {}
self.update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_update"
self.options_update_signal = f"{DATA_OPENTHERM_GW}_{self.gw_id}_options_update"
self.gateway = pyotgw.pyotgw()
self.gw_version = None
async def cleanup(self, event=None):
"""Reset overrides on the gateway."""
await self.gateway.set_control_setpoint(0)
await self.gateway.set_max_relative_mod("-")
await self.gateway.disconnect()
async def connect_and_subscribe(self):
"""Connect to serial device and subscribe report handler."""
self.status = await self.gateway.connect(self.hass.loop, self.device_path)
_LOGGER.debug("Connected to OpenTherm Gateway at %s", self.device_path)
self.gw_version = self.status.get(gw_vars.OTGW_BUILD)
self.hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, self.cleanup)
async def handle_report(status):
"""Handle reports from the OpenTherm Gateway."""
_LOGGER.debug("Received report: %s", status)
self.status = status
async_dispatcher_send(self.hass, self.update_signal, status)
self.gateway.subscribe(handle_report)
|
sunlianqiang/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/leakers/test_selftype.py | 195 | # Reference cycles involving only the ob_type field are rather uncommon
# but possible. Inspired by SF bug 1469629.
import gc
def leak():
class T(type):
pass
class U(type, metaclass=T):
pass
U.__class__ = U
del U
gc.collect(); gc.collect(); gc.collect()
|
Grogdor/CouchPotatoServer | refs/heads/master | libs/dateutil/__init__.py | 147 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>"
__license__ = "Simplified BSD"
__version__ = "2.1"
|
collmot/ardupilot | refs/heads/master | Tools/scripts/build_binaries_history.py | 18 | #!/usr/bin/env python
from __future__ import print_function
import os
import sqlite3
class BuildBinariesHistory():
def __init__(self, db_filepath):
self.db_filepath = db_filepath
self.assure_db_present()
def progress(self, msg):
print("BBHIST: %s" % msg)
def conn(self):
return sqlite3.connect(self.db_filepath)
def create_schema(self, c):
'''create our tables and whatnot'''
schema_version = 1
c.execute("create table version (version integer)")
c.execute("insert into version (version) values (?)", (schema_version,))
# at some stage we should probably directly associate build with runs....
c.execute("create table build (hash text, tag text, vehicle text, board text, "
"frame text, text integer, data integer, bss integer, start_time real, duration real)")
c.execute("create table run (hash text, tag text, start_time real, duration real)")
c.commit()
def sizes_for_file(self, filepath):
cmd = "size %s" % (filepath,)
stuff = os.popen(cmd).read()
lines = stuff.split("\n")
sizes = lines[1].split("\t")
text = int(sizes[0])
data = int(sizes[1])
bss = int(sizes[2])
self.progress("Binary size of %s:" % filepath)
self.progress("text=%u" % text)
self.progress("data=%u" % data)
self.progress("bss=%u" % bss)
return (text, data, bss)
def assure_db_present(self):
c = self.conn()
need_schema_create = False
try:
version_cursor = c.execute("select version from version")
except sqlite3.OperationalError as e:
if "no such table" in str(e): # FIXME: do better here? what's in "e"?
print("need schema create")
need_schema_create = True
if need_schema_create:
self.create_schema(c)
version_cursor = c.execute("select version from version")
version_results = version_cursor.fetchall()
if len(version_results) == 0:
raise IOError("No version number?")
if len(version_results) > 1:
raise IOError("More than one version result?")
first = version_results[0]
want_version = 1
got_version = first[0]
if got_version != want_version:
raise IOError("Bad version number (want=%u got=%u" %
(want_version, got_version))
self.progress("Got history version %u" % got_version)
def record_build(self, hash, tag, vehicle, board, frame, bare_path, start_time, duration):
if bare_path is None:
(text, data, bss) = (None, None, None)
else:
(text, data, bss) = self.sizes_for_file(bare_path)
c = self.conn()
c.execute("replace into build (hash, tag, vehicle, board, frame, text, data, bss, start_time, duration) "
"values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(hash, tag, vehicle, board, frame, text, data, bss, start_time, duration))
c.commit()
def record_run(self, hash, tag, start_time, duration):
c = self.conn()
c.execute("replace into run (hash, tag, start_time, duration) "
"values (?, ?, ?, ?)",
(hash, tag, start_time, duration))
c.commit()
|
gerv/bedrock | refs/heads/master | tests/functional/firefox/desktop/test_all.py | 1 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.firefox.desktop.all import FirefoxDesktopBasePage
@pytest.mark.skip_if_firefox(reason='Download button is not shown for up-to-date Firefox browsers.')
@pytest.mark.nondestructive
@pytest.mark.parametrize(('slug', 'locale'), [
('customize', None),
('fast', 'de'),
('trust', None)])
def test_download_button_is_displayed(slug, locale, base_url, selenium):
locale = locale or 'en-US'
page = FirefoxDesktopBasePage(selenium, base_url, locale, slug=slug).open()
assert page.download_button.is_displayed
|
kerr-huang/SL4A | refs/heads/master | python/src/Lib/glob.py | 173 | """Filename globbing utility."""
import sys
import os
import re
import fnmatch
__all__ = ["glob", "iglob"]
def glob(pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
return list(iglob(pathname))
def iglob(pathname):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
if not has_magic(pathname):
if os.path.lexists(pathname):
yield pathname
return
dirname, basename = os.path.split(pathname)
if not dirname:
for name in glob1(os.curdir, basename):
yield name
return
if has_magic(dirname):
dirs = iglob(dirname)
else:
dirs = [dirname]
if has_magic(basename):
glob_in_dir = glob1
else:
glob_in_dir = glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(dirname, pattern):
if not dirname:
dirname = os.curdir
if isinstance(pattern, unicode) and not isinstance(dirname, unicode):
dirname = unicode(dirname, sys.getfilesystemencoding() or
sys.getdefaultencoding())
try:
names = os.listdir(dirname)
except os.error:
return []
if pattern[0] != '.':
names = filter(lambda x: x[0] != '.', names)
return fnmatch.filter(names, pattern)
def glob0(dirname, basename):
if basename == '':
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
magic_check = re.compile('[*?[]')
def has_magic(s):
return magic_check.search(s) is not None
|
rasata/ansible | refs/heads/devel | lib/ansible/executor/task_queue_manager.py | 9 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import socket
import sys
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.process.result import ResultProcess
from ansible.executor.stats import AggregateStats
from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader, module_loader
from ansible.template import Templar
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._display = display
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._callbacks_loaded = False
self._callback_plugins = []
# make sure the module path (if specified) is parsed and
# added to the module_loader object
if options.module_path is not None:
for path in options.module_path.split(os.pathsep):
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
self._final_q = multiprocessing.Queue()
# create the pool of worker threads, based on the number of forks specified
try:
fileno = sys.stdin.fileno()
except ValueError:
fileno = None
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
self._workers = []
for i in range(self._options.forks):
main_q = multiprocessing.Queue()
rslt_q = multiprocessing.Queue()
prc = WorkerProcess(self, main_q, rslt_q, loader)
prc.start()
self._workers.append((prc, main_q, rslt_q))
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notified_handlers(self, handlers):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
# FIXME: there is a block compile helper for this...
handler_list = []
for handler_block in handlers:
for handler in handler_block.block:
handler_list.append(handler)
# then initialize it with the handler names from the handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST:
continue
self._callback_plugins.append(callback_plugin(self._display))
else:
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
)
# and run the play using the strategy
return strategy.run(iterator, play_context)
def cleanup(self):
self._display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._result_prc.terminate()
for (worker_prc, main_q, rslt_q) in self._workers:
rslt_q.close()
main_q.close()
worker_prc.terminate()
def clear_failed_hosts(self):
self._failed_hosts = dict()
self._unreachable_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_notified_handlers(self):
return self._notified_handlers
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
methods = [
getattr(callback_plugin, method_name, None),
getattr(callback_plugin, 'v2_on_any', None)
]
for method in methods:
if method is not None:
try:
method(*args, **kwargs)
except Exception as e:
self._display.warning('Error when using %s: %s' % (method, str(e)))
|
yumaokao/gdrv | refs/heads/master | gdrv/commands/command_base.py | 1 | #!/usr/bin/python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
import os
import sys
import logging
import fnmatch
import httplib2
from apiclient import errors
from apiclient.discovery import build
from oauth2client.file import Storage
lg = logging.getLogger("BASE")
# lg.setLevel(logging.INFO)
class DriveCommand():
""" A Drive Command Class """
def __init__(self, pconfig):
self.config = pconfig
self.msgout = sys.stdout
@staticmethod
def static_add_sub_command_parser(psub_par):
pass
def __call__(self, args=None):
if args is not None:
self.args = args
self.do_drive_command()
def do_drive_command(self):
pass
# ## base command methods ##
def info(self, *args):
try:
self.msgout.write(*args)
self.msgout.write('\n')
self.msgout.flush()
except UnicodeError:
pass
def info_append(self, *args):
try:
self.msgout.write(*args)
self.msgout.flush()
# self.msgout.write('\n')
except UnicodeError:
pass
def parse_input_string(self, pinstr, pmaxlen):
idxs = []
if pinstr == 'a':
return range(pmaxlen)
for acom in pinstr.split(','):
arange = acom.split('-')
# lg.debug("aidx ")
# lg.debug(arange)
try:
if len(arange) == 1:
aidx = int(arange[0])
idxs.append(aidx)
elif len(arange) == 2:
aidx = int(arange[0])
bidx = int(arange[1])
idxs.extend(range(aidx, bidx + 1))
except ValueError:
pass
# lg.debug("aidx %d bidx %d") % (aidx, bidx)
# ridx = filter(lambda x: x < pmaxlen, idxs)
# lg.debug(ridx)
return set(filter(lambda x: x < pmaxlen, idxs))
class DriveServiceCommand(DriveCommand):
""" A Drive Service Command Class """
def get_storage(self):
self.storage = Storage(
os.path.expanduser(self.config.get('api', 'storage')))
def get_credentials(self):
self.credentials = None
self.get_storage()
self.credentials = self.storage.get()
def get_service(self):
self.service = None
self.get_credentials()
if self.credentials is None or self.credentials.invalid:
print "Please init oauth2 flow first"
else:
http = httplib2.Http()
http = self.credentials.authorize(http)
self.service = build('drive', 'v2', http=http)
def do_drive_command(self):
self.get_service()
if self.service is not None:
self.do_service_command()
def do_service_command(self):
pass
# ## helper drive apis ##
def find_drive_files(self, psrcdir, pname,
hidedir=False, hidetrashed=True):
matches = []
files = self.get_all_children(psrcdir,
hidedir=hidedir, hidetrashed=hidetrashed)
for afile in files:
if fnmatch.fnmatch(afile['title'], pname):
matches.append(afile)
return matches
def get_all_children(self, psrcdir, hidedir=False, hidetrashed=True):
parentid = self.find_parent_id(psrcdir)
if parentid is None:
lg.error("Can't find directory %s in drive" % psrcdir)
sys.exit("Can't find directory %s in drive" % psrcdir)
query = "'%s' in parents" % parentid
if hidedir is True:
query += " and mimeType != 'application/vnd.google-apps.folder'"
if hidetrashed is True:
query += " and trashed = false"
return self.file_list(query)
def find_parent_id(self, pdir, pmkdir=False):
dirs = pdir.split('/')
parentid = 'root'
# for aidx in range(len(dirs)):
for adir in dirs:
# lg.debug("dirs %s" % (adir))
if adir == '':
continue
children_dirs = self.check_children_dirs(adir, parentid)
dirs_nums = len(children_dirs)
if dirs_nums == 0:
lg.error("Can't find directory %s" % (adir))
return None
elif dirs_nums > 1:
lg.warn("Find %d instances of directory %s" % (
dirs_nums, adir))
parentid = children_dirs[0]['id']
return parentid
def check_children_dirs(self, dirname, parent="root"):
query = "mimeType = 'application/vnd.google-apps.folder'"
query += " and title = '%s'" % dirname
query += " and '%s' in parents" % parent
# lg.debug("query %s" % query)
children_dirs = self.file_list(query)
# for adir in children_dirs:
# lg.debug("children %s id %s" % (adir['title'], adir['id']))
return children_dirs
# ## basic drive apis ##
def file_list(self, query=""):
"""Retrieve a list of File resources.
Args:
service: Drive API service instance.
Returns:
List of File resources.
"""
# lg.debug("file_list query %s" % query)
result = []
page_token = None
while True:
try:
param = {}
if query != "":
param['q'] = query
if page_token:
param['pageToken'] = page_token
files = self.service.files().list(**param).execute()
result.extend(files['items'])
page_token = files.get('nextPageToken')
if not page_token:
break
except errors.HttpError, error:
print 'An error occurred: %s' % error
break
return result
def permission_list(self, pfile="root"):
"""Retrieve a list of permissions of the file
Args:
pfile: drive file id
Returns:
list of file permissions
"""
# lg.debug("permission_list query %s" % query)
result = []
page_token = None
while True:
try:
param = {}
if page_token:
param['pageToken'] = page_token
perms = self.service.permissions().list(fileId=pfile).execute()
result.extend(perms['items'])
page_token = perms.get('nextPageToken')
if not page_token:
break
except errors.HttpError, error:
print 'An error occurred: %s' % error
break
return result
# deprecated
def children_list(self, parent="root", query=""):
"""Retrieve a list of File resources.
Args:
parent: parent id or alias 'root'
query: query string
Returns:
List of File resources.
"""
result = []
page_token = None
while True:
try:
param = {}
if query != "":
param['q'] = query
if page_token:
param['pageToken'] = page_token
files = self.service.children().list(
folderId=parent, **param).execute()
result.extend(files['items'])
page_token = files.get('nextPageToken')
if not page_token:
break
except errors.HttpError, error:
print 'An error occurred: %s' % error
break
return result
|
Ahn1/Clinq | refs/heads/master | web/clinq/management/commands/updateindex.py | 1 | import os
import clinq.models as model
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
import clinq.management.commands.tagHandler as handler
class Command(BaseCommand):
#option_list = BaseCommand.option_list + (
# make_option('--long', '-l', dest='long',
# help='Help for the long options'),
#)
help = 'Refresh media index'
def handle(self, **options):
path = settings.MEDIA_PATH
print path
self.IndexFolder(path)
def IndexFolder(self, path):
oslist = os.listdir(path)
oslist = [os.path.join(path,f) for f in oslist]
files = [f for f in oslist if os.path.isfile(f)]
dirs = [f for f in oslist if os.path.isdir(f)]
for subdir in dirs:
self.IndexFolder(subdir)
#print subdir
for targetFile in files:
self.IndexFile(targetFile)
def IndexFile(self, path):
try:
fileName, fileExtension = os.path.splitext(path)
relPath = os.path.relpath(path,settings.MEDIA_PATH)
dbObj = None
if model.File.objects.filter(path=relPath).count() == 0:
dbObj = model.File()
dbObj.path = relPath
else:
dbObj = model.File.objects.filter(path=relPath)[:1][0]
lastEditTime = os.stat(path).st_mtime
if dbObj.changeDate < lastEditTime:
dbObj.changeDate = lastEditTime
dbObj.save()
if fileExtension in [".mp3"]:
self.HandleAudioFile(path, dbObj)
else:
print("Skip file '%s'" % path)
except Exception, e:
print e
def HandleAudioFile(self, path, refdbFile):
print "Try to handle {0}".format(path)
fileName, fileExtension = os.path.splitext(path)
tagObject = None
if model.AudioFile.objects.filter(refFile=refdbFile).count() == 0:
tagObject = model.AudioFile()
tagObject.refFile = refdbFile
print "Create new mp3 Tag"
else:
tagObject = model.AudioFile.objects.filter(refFile=refdbFile)[:1][0]
print "Load mp3 Tag"
if fileExtension in handler.audio:
handlerClass = handler.audio[fileExtension]
handlerObj = handlerClass()
handlerObj.UpdateTags(path, tagObject)
print [tagObject]
tagObject.save()
|
bqbn/addons-server | refs/heads/master | src/olympia/addons/api_urls.py | 1 | from django.conf.urls import include, url
from rest_framework.routers import SimpleRouter
from rest_framework_nested.routers import NestedSimpleRouter
from olympia.activity.views import VersionReviewNotesViewSet
from .views import (
AddonAutoCompleteSearchView, AddonFeaturedView, AddonRecommendationView,
AddonSearchView, AddonVersionViewSet, AddonViewSet, CompatOverrideView,
LanguageToolsView, ReplacementAddonView, StaticCategoryView)
addons = SimpleRouter()
addons.register(r'addon', AddonViewSet, basename='addon')
# Router for children of /addons/addon/{addon_pk}/.
sub_addons = NestedSimpleRouter(addons, r'addon', lookup='addon')
sub_addons.register('versions', AddonVersionViewSet, basename='addon-version')
sub_versions = NestedSimpleRouter(sub_addons, r'versions', lookup='version')
sub_versions.register(r'reviewnotes', VersionReviewNotesViewSet,
basename='version-reviewnotes')
urls = [
url(r'', include(addons.urls)),
url(r'', include(sub_addons.urls)),
url(r'', include(sub_versions.urls)),
url(r'^autocomplete/$', AddonAutoCompleteSearchView.as_view(),
name='addon-autocomplete'),
url(r'^search/$', AddonSearchView.as_view(), name='addon-search'),
url(r'^categories/$', StaticCategoryView.as_view(), name='category-list'),
url(r'^language-tools/$', LanguageToolsView.as_view(),
name='addon-language-tools'),
url(r'^replacement-addon/$', ReplacementAddonView.as_view(),
name='addon-replacement-addon'),
url(r'^recommendations/$', AddonRecommendationView.as_view(),
name='addon-recommendations'),
]
addons_v3 = urls + [
url(r'^compat-override/$', CompatOverrideView.as_view(),
name='addon-compat-override'),
url(r'^featured/$', AddonFeaturedView.as_view(), name='addon-featured'),
]
addons_v4 = urls
|
Dubrzr/django-push-notifications | refs/heads/master | tests/test_models.py | 14 | import json
import mock
from django.test import TestCase
from django.utils import timezone
from push_notifications.models import GCMDevice, APNSDevice
from tests.mock_responses import GCM_PLAIN_RESPONSE, \
GCM_MULTIPLE_JSON_RESPONSE, GCM_PLAIN_RESPONSE_ERROR, \
GCM_JSON_RESPONSE_ERROR, GCM_PLAIN_RESPONSE_ERROR_B
from push_notifications.gcm import GCMError
class ModelTestCase(TestCase):
def test_can_save_gcm_device(self):
device = GCMDevice.objects.create(
registration_id="a valid registration id"
)
assert device.id is not None
assert device.date_created is not None
assert device.date_created.date() == timezone.now().date()
def test_can_create_save_device(self):
device = APNSDevice.objects.create(
registration_id="a valid registration id"
)
assert device.id is not None
assert device.date_created is not None
assert device.date_created.date() == timezone.now().date()
def test_gcm_send_message(self):
device = GCMDevice.objects.create(
registration_id="abc",
)
with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE) as p:
device.send_message("Hello world")
p.assert_called_once_with(
b"data.message=Hello+world®istration_id=abc",
"application/x-www-form-urlencoded;charset=UTF-8")
def test_gcm_send_message_extra(self):
device = GCMDevice.objects.create(
registration_id="abc",
)
with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE) as p:
device.send_message("Hello world", extra={"foo": "bar"})
p.assert_called_once_with(
b"data.foo=bar&data.message=Hello+world®istration_id=abc",
"application/x-www-form-urlencoded;charset=UTF-8")
def test_gcm_send_message_collapse_key(self):
device = GCMDevice.objects.create(
registration_id="abc",
)
with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE) as p:
device.send_message("Hello world", collapse_key="test_key")
p.assert_called_once_with(
b"collapse_key=test_key&data.message=Hello+world®istration_id=abc",
"application/x-www-form-urlencoded;charset=UTF-8")
def test_gcm_send_message_to_multiple_devices(self):
GCMDevice.objects.create(
registration_id="abc",
)
GCMDevice.objects.create(
registration_id="abc1",
)
with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE) as p:
GCMDevice.objects.all().send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"data": { "message": "Hello world" },
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json")
def test_gcm_send_message_extra_to_multiple_devices(self):
GCMDevice.objects.create(
registration_id="abc",
)
GCMDevice.objects.create(
registration_id="abc1",
)
with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE) as p:
GCMDevice.objects.all().send_message("Hello world", extra={"foo": "bar"})
p.assert_called_once_with(
json.dumps({
"data": { "foo": "bar", "message": "Hello world" },
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json")
def test_gcm_send_message_collapse_to_multiple_devices(self):
GCMDevice.objects.create(
registration_id="abc",
)
GCMDevice.objects.create(
registration_id="abc1",
)
with mock.patch("push_notifications.gcm._gcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE) as p:
GCMDevice.objects.all().send_message("Hello world", collapse_key="test_key")
p.assert_called_once_with(
json.dumps({
"collapse_key": "test_key",
"data": { "message": "Hello world" },
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json")
def test_gcm_send_message_to_single_device_with_error(self):
# these errors are device specific, device.active will be set false
device_list = ['abc', 'abc1']
self.create_devices(device_list)
for index, error in enumerate(GCM_PLAIN_RESPONSE_ERROR):
with mock.patch("push_notifications.gcm._gcm_send",
return_value=error) as p:
device = GCMDevice.objects. \
get(registration_id=device_list[index])
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id=device_list[index]) \
.active is False
def test_gcm_send_message_to_single_device_with_error_b(self):
# these errors are not device specific, GCMError should be thrown
device_list = ['abc']
self.create_devices(device_list)
with mock.patch("push_notifications.gcm._gcm_send",
return_value=GCM_PLAIN_RESPONSE_ERROR_B) as p:
device = GCMDevice.objects. \
get(registration_id=device_list[0])
with self.assertRaises(GCMError):
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id=device_list[0]) \
.active is True
def test_gcm_send_message_to_multiple_devices_with_error(self):
device_list = ['abc', 'abc1', 'abc2']
self.create_devices(device_list)
with mock.patch("push_notifications.gcm._gcm_send",
return_value=GCM_JSON_RESPONSE_ERROR) as p:
devices = GCMDevice.objects.all()
devices.send_message("Hello World")
assert GCMDevice.objects.get(registration_id=device_list[0]) \
.active is False
assert GCMDevice.objects.get(registration_id=device_list[1]) \
.active is True
assert GCMDevice.objects.get(registration_id=device_list[2]) \
.active is False
def test_apns_send_message(self):
device = APNSDevice.objects.create(
registration_id="abc",
)
socket = mock.MagicMock()
with mock.patch("push_notifications.apns._apns_pack_frame") as p:
device.send_message("Hello world", socket=socket, expiration=1)
p.assert_called_once_with("abc", b'{"aps":{"alert":"Hello world"}}', 0, 1, 10)
def test_apns_send_message_extra(self):
device = APNSDevice.objects.create(
registration_id="abc",
)
socket = mock.MagicMock()
with mock.patch("push_notifications.apns._apns_pack_frame") as p:
device.send_message("Hello world", extra={"foo": "bar"}, socket=socket, identifier=1, expiration=2, priority=5)
p.assert_called_once_with("abc", b'{"aps":{"alert":"Hello world"},"foo":"bar"}', 1, 2, 5)
def create_devices(self, devices):
for device in devices:
GCMDevice.objects.create(
registration_id=device,
)
|
kthordarson/youtube-dl-ruv | refs/heads/master | youtube_dl/extractor/comedycentral.py | 3 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .mtv import MTVServicesInfoExtractor
from ..utils import (
compat_str,
compat_urllib_parse,
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(InfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow or :colbert
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
(?:[?#].*|$)'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
@staticmethod
def _transform_rtmp_url(rtmp_video_url):
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url)
if not m:
raise ExtractorError('Cannot transform RTMP url')
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
return base + m.group('finalid')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.cc.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'format_id': 'vhttp-%s' % format,
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
'format_note': 'HTTP 400 at the moment (patches welcome!)',
'preference': -100,
})
formats.append({
'format_id': 'rtmp-%s' % format,
'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
self._sort_formats(formats)
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
entries.append({
'id': guid,
'title': virtual_id,
'formats': formats,
'uploader': show_name,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
})
return {
'_type': 'playlist',
'entries': entries,
'title': show_name + ' ' + title,
'description': description,
}
|
MaizerGomes/youtube-dl | refs/heads/master | youtube_dl/extractor/collegerama.py | 111 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
float_or_none,
int_or_none,
)
class CollegeRamaIE(InfoExtractor):
_VALID_URL = r'https?://collegerama\.tudelft\.nl/Mediasite/Play/(?P<id>[\da-f]+)'
_TESTS = [
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d',
'md5': '481fda1c11f67588c0d9d8fbdced4e39',
'info_dict': {
'id': '585a43626e544bdd97aeb71a0ec907a01d',
'ext': 'mp4',
'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.',
'description': '',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 7713.088,
'timestamp': 1413309600,
'upload_date': '20141014',
},
},
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4',
'md5': 'ef1fdded95bdf19b12c5999949419c92',
'info_dict': {
'id': '86a9ea9f53e149079fbdb4202b521ed21d',
'ext': 'wmv',
'title': '64ste Vakantiecursus: Afvalwater',
'description': 'md5:7fd774865cc69d972f542b157c328305',
'duration': 10853,
'timestamp': 1326446400,
'upload_date': '20120113',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
player_options_request = {
"getPlayerOptionsRequest": {
"ResourceId": video_id,
"QueryString": "",
}
}
request = compat_urllib_request.Request(
'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',
json.dumps(player_options_request))
request.add_header('Content-Type', 'application/json')
player_options = self._download_json(request, video_id)
presentation = player_options['d']['Presentation']
title = presentation['Title']
description = presentation.get('Description')
thumbnail = None
duration = float_or_none(presentation.get('Duration'), 1000)
timestamp = int_or_none(presentation.get('UnixTime'), 1000)
formats = []
for stream in presentation['Streams']:
for video in stream['VideoUrls']:
thumbnail_url = stream.get('ThumbnailUrl')
if thumbnail_url:
thumbnail = 'http://collegerama.tudelft.nl' + thumbnail_url
format_id = video['MediaType']
if format_id == 'SS':
continue
formats.append({
'url': video['Location'],
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
}
|
DVSBA/ajenti | refs/heads/master | ajenti/feedback.py | 17 | """
Module for sending usage statistics to ajenti.org
"""
__all__ = ['send_stats', 'check_uid']
import os
import base64
import random
from ajenti.utils import *
from ajenti import version
global uid
uid = ''
def send_stats(server, plugins, addplugin=None, delplugin=None):
"""
Sends usage statistics to the server. Statistics include: OS name, list of
installed plugins and Ajenti version.
:param server: server URL
:type server: str
:param addplugin: plugin being currently installed or None
:type addplugin: str
:param delplugin: plugin being currently removed or None
:type delplugin: str
"""
plugs = []
plugs.extend(plugins)
if addplugin:
plugs.append(addplugin)
if delplugin and delplugin in plugs:
plugs.remove(delplugin)
plugs = ','.join(plugs)
data = '1|%s|%s|%s|,%s,' % (uid, version(), detect_platform(mapping=False), plugs)
data = base64.b64encode(data)
download('http://%s/api/submit?data=%s' % (server, data))
def check_uid():
"""
Checks that installation UID is present and generates it if it's not.
"""
global uid
file = '/var/lib/ajenti/installation-uid'
if not os.path.exists(file):
uid = str(random.randint(1, 9000*9000))
try:
open(file, 'w').write(uid)
except:
uid = '0'
else:
uid = open(file).read()
|
tshirtman/zine_ad_sense | refs/heads/master | __init__.py | 1 | # -*- coding: utf-8 -*-
"""
zine.plugins.ad_sense
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by gabriel pettier
:license: GPL, see LICENSE for more details.
"""
from os.path import dirname, join
from random import choice
from zine.api import *
from zine.views.admin import render_admin_response
from zine.utils.admin import flash
from zine.utils.http import redirect
from zine.utils.forms import TextField
from zine.config import ConfigurationTransactionError
from zine.privileges import BLOG_ADMIN
TEMPLATES = join(dirname(__file__), 'templates')
def add_ad_sense_link(req, navigation_bar):
if not req.user.has_privilege(BLOG_ADMIN):
return
for link_id, url, title, children in navigation_bar:
if link_id == 'options':
children.insert(-3, ('ad sense', url_for('ad_sense/config'),
_('Ad sense')))
@require_privilege(BLOG_ADMIN)
def view_ad_sense_config(req):
client_code = req.args.get('client_code')
banner_slot = req.args.get('banner_slot')
width = req.args.get('width')
height = req.args.get('height')
if client_code and banner_slot and width and height:
try:
req.app.cfg.change_single('ad_sense/client_code', client_code)
req.app.cfg.change_single('ad_sense/banner_slot', banner_slot)
req.app.cfg.change_single('ad_sense/width', width)
req.app.cfg.change_single('ad_sense/height', height)
flash(_('Config updated!'), 'info')
except ConfigurationTransactionError, e:
flash(_('The code could not be changed.'), 'error')
return redirect(url_for('ad_sense/config'))
return render_admin_response('admin/ad_sense.html',
'config.ad_sense',
client_code=req.app.cfg['ad_sense/client_code'],
banner_slot=req.app.cfg['ad_sense/banner_slot'],
width=req.app.cfg['ad_sense/width'],
height=req.app.cfg['ad_sense/height']
)
def add_adsense_banner(post):
conf = get_application().cfg
client_code = conf['ad_sense/client_code']
banner_slot = conf['ad_sense/banner_slot']
banner_width = conf['ad_sense/width']
banner_height = conf['ad_sense/height']
if choice((True, False)):
return '''
<span class="ad">
<script type="text/javascript"><!--
google_ad_client = "'''+client_code+'''";
google_ad_slot = "'''+banner_slot+'''";
google_ad_width = '''+banner_width+''';
google_ad_height = '''+banner_height+''';
//-->
</script>
<script type="text/javascript"
src="http://pagead2.googlesyndication.com/pagead/show_ads.js">
</script>
</span>
'''
else:
return ''
def insert_header_js(metadata):
metadata.append('''
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-23430110-1']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
''')
def setup(app, plugin):
"""This function is called by Zine in the application initialization
phase. Here we connect to the events and register our template paths,
url rules, views etc.
"""
app.connect_event('after-entry-rendered', add_adsense_banner)
# our fish has a configurable skin. So we register one for it which
# defaults to blue.
app.add_config_var('ad_sense/client_code', TextField(default=''))
app.add_config_var('ad_sense/banner_slot', TextField(default=''))
app.add_config_var('ad_sense/width', TextField(default=''))
app.add_config_var('ad_sense/height', TextField(default=''))
app.connect_event('modify-admin-navigation-bar', add_ad_sense_link)
app.connect_event('before-metadata-assembled', insert_header_js)
# for the admin panel we add a url rule. Because it's an admin panel
# page located in options we add such an url rule.
app.add_url_rule('/options/ad_sense', prefix='admin',
endpoint='ad_sense/config',
view=view_ad_sense_config)
# add our templates to the searchpath so that Zine can find the
# admin panel template for the fish config panel.
app.add_template_searchpath(TEMPLATES)
|
jonyroda97/redbot-amigosprovaveis | refs/heads/develop | lib/pip/_vendor/urllib3/contrib/socks.py | 65 | # -*- coding: utf-8 -*-
"""
This module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
The SOCKS implementation supports the full range of urllib3 features. It also
supports the following SOCKS features:
- SOCKS4
- SOCKS4a
- SOCKS5
- Usernames and passwords for the SOCKS proxy
Known Limitations:
- Currently PySocks does not support contacting remote websites via literal
IPv6 addresses. Any such connection attempt will fail. You must use a domain
name.
- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
such connection attempt will fail.
"""
from __future__ import absolute_import
try:
import socks
except ImportError:
import warnings
from ..exceptions import DependencyWarning
warnings.warn((
'SOCKS support in urllib3 requires the installation of optional '
'dependencies: specifically, PySocks. For more information, see '
'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
),
DependencyWarning
)
raise
from socket import error as SocketError, timeout as SocketTimeout
from ..connection import (
HTTPConnection, HTTPSConnection
)
from ..connectionpool import (
HTTPConnectionPool, HTTPSConnectionPool
)
from ..exceptions import ConnectTimeoutError, NewConnectionError
from ..poolmanager import PoolManager
from ..util.url import parse_url
try:
import ssl
except ImportError:
ssl = None
class SOCKSConnection(HTTPConnection):
"""
A plain-text HTTP connection that connects via a SOCKS proxy.
"""
def __init__(self, *args, **kwargs):
self._socks_options = kwargs.pop('_socks_options')
super(SOCKSConnection, self).__init__(*args, **kwargs)
def _new_conn(self):
"""
Establish a new connection via the SOCKS proxy.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = socks.create_connection(
(self.host, self.port),
proxy_type=self._socks_options['socks_version'],
proxy_addr=self._socks_options['proxy_host'],
proxy_port=self._socks_options['proxy_port'],
proxy_username=self._socks_options['username'],
proxy_password=self._socks_options['password'],
proxy_rdns=self._socks_options['rdns'],
timeout=self.timeout,
**extra_kw
)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except socks.ProxyError as e:
# This is fragile as hell, but it seems to be the only way to raise
# useful errors here.
if e.socket_err:
error = e.socket_err
if isinstance(error, SocketTimeout):
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout)
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % error
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % e
)
except SocketError as e: # Defensive: PySocks should catch all these.
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
# We don't need to duplicate the Verified/Unverified distinction from
# urllib3/connection.py here because the HTTPSConnection will already have been
# correctly set to either the Verified or Unverified form by that module. This
# means the SOCKSHTTPSConnection will automatically be the correct type.
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
pass
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = SOCKSConnection
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
"""
A version of the urllib3 ProxyManager that routes connections via the
defined SOCKS proxy.
"""
pool_classes_by_scheme = {
'http': SOCKSHTTPConnectionPool,
'https': SOCKSHTTPSConnectionPool,
}
def __init__(self, proxy_url, username=None, password=None,
num_pools=10, headers=None, **connection_pool_kw):
parsed = parse_url(proxy_url)
if username is None and password is None and parsed.auth is not None:
split = parsed.auth.split(':')
if len(split) == 2:
username, password = split
if parsed.scheme == 'socks5':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = False
elif parsed.scheme == 'socks5h':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = True
elif parsed.scheme == 'socks4':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = False
elif parsed.scheme == 'socks4a':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = True
else:
raise ValueError(
"Unable to determine SOCKS version from %s" % proxy_url
)
self.proxy_url = proxy_url
socks_options = {
'socks_version': socks_version,
'proxy_host': parsed.host,
'proxy_port': parsed.port,
'username': username,
'password': password,
'rdns': rdns
}
connection_pool_kw['_socks_options'] = socks_options
super(SOCKSProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
|
acshan/odoo | refs/heads/8.0 | addons/google_calendar/google_calendar.py | 59 | # -*- coding: utf-8 -*-
import operator
import simplejson
import urllib2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, exception_to_unicode
from openerp.tools.translate import _
from openerp.http import request
from datetime import datetime, timedelta
from dateutil import parser
import pytz
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
def status_response(status, substr=False):
if substr:
return int(str(status)[0])
else:
return status_response(status, substr=True) == 2
class Meta(type):
""" This Meta class allow to define class as a structure, and so instancied variable
in __init__ to avoid to have side effect alike 'static' variable """
def __new__(typ, name, parents, attrs):
methods = dict((k, v) for k, v in attrs.iteritems()
if callable(v))
attrs = dict((k, v) for k, v in attrs.iteritems()
if not callable(v))
def init(self, **kw):
for k, v in attrs.iteritems():
setattr(self, k, v)
for k, v in kw.iteritems():
assert k in attrs
setattr(self, k, v)
methods['__init__'] = init
methods['__getitem__'] = getattr
return type.__new__(typ, name, parents, methods)
class Struct(object):
__metaclass__ = Meta
class OpenerpEvent(Struct):
event = False
found = False
event_id = False
isRecurrence = False
isInstance = False
update = False
status = False
attendee_id = False
synchro = False
class GmailEvent(Struct):
event = False
found = False
isRecurrence = False
isInstance = False
update = False
status = False
class SyncEvent(object):
def __init__(self):
self.OE = OpenerpEvent()
self.GG = GmailEvent()
self.OP = None
def __getitem__(self, key):
return getattr(self, key)
def compute_OP(self, modeFull=True):
#If event are already in Gmail and in OpenERP
if self.OE.found and self.GG.found:
is_owner = self.OE.event.env.user.id == self.OE.event.user_id.id
#If the event has been deleted from one side, we delete on other side !
if self.OE.status != self.GG.status and is_owner:
self.OP = Delete((self.OE.status and "OE") or (self.GG.status and "GG"),
'The event has been deleted from one side, we delete on other side !')
#If event is not deleted !
elif self.OE.status and (self.GG.status or not is_owner):
if self.OE.update.split('.')[0] != self.GG.update.split('.')[0]:
if self.OE.update < self.GG.update:
tmpSrc = 'GG'
elif self.OE.update > self.GG.update:
tmpSrc = 'OE'
assert tmpSrc in ['GG', 'OE']
#if self.OP.action == None:
if self[tmpSrc].isRecurrence:
if self[tmpSrc].status:
self.OP = Update(tmpSrc, 'Only need to update, because i\'m active')
else:
self.OP = Exclude(tmpSrc, 'Need to Exclude (Me = First event from recurrence) from recurrence')
elif self[tmpSrc].isInstance:
self.OP = Update(tmpSrc, 'Only need to update, because already an exclu')
else:
self.OP = Update(tmpSrc, 'Simply Update... I\'m a single event')
else:
if not self.OE.synchro or self.OE.synchro.split('.')[0] < self.OE.update.split('.')[0]:
self.OP = Update('OE', 'Event already updated by another user, but not synchro with my google calendar')
else:
self.OP = NothingToDo("", 'Not update needed')
else:
self.OP = NothingToDo("", "Both are already deleted")
# New in openERP... Create on create_events of synchronize function
elif self.OE.found and not self.GG.found:
if self.OE.status:
self.OP = Delete('OE', 'Update or delete from GOOGLE')
else:
if not modeFull:
self.OP = Delete('GG', 'Deleted from Odoo, need to delete it from Gmail if already created')
else:
self.OP = NothingToDo("", "Already Deleted in gmail and unlinked in Odoo")
elif self.GG.found and not self.OE.found:
tmpSrc = 'GG'
if not self.GG.status and not self.GG.isInstance:
# don't need to make something... because event has been created and deleted before the synchronization
self.OP = NothingToDo("", 'Nothing to do... Create and Delete directly')
else:
if self.GG.isInstance:
if self[tmpSrc].status:
self.OP = Exclude(tmpSrc, 'Need to create the new exclu')
else:
self.OP = Exclude(tmpSrc, 'Need to copy and Exclude')
else:
self.OP = Create(tmpSrc, 'New EVENT CREATE from GMAIL')
def __str__(self):
return self.__repr__()
def __repr__(self):
myPrint = "\n\n---- A SYNC EVENT ---"
myPrint += "\n ID OE: %s " % (self.OE.event and self.OE.event.id)
myPrint += "\n ID GG: %s " % (self.GG.event and self.GG.event.get('id', False))
myPrint += "\n Name OE: %s " % (self.OE.event and self.OE.event.name.encode('utf8'))
myPrint += "\n Name GG: %s " % (self.GG.event and self.GG.event.get('summary', '').encode('utf8'))
myPrint += "\n Found OE:%5s vs GG: %5s" % (self.OE.found, self.GG.found)
myPrint += "\n Recurrence OE:%5s vs GG: %5s" % (self.OE.isRecurrence, self.GG.isRecurrence)
myPrint += "\n Instance OE:%5s vs GG: %5s" % (self.OE.isInstance, self.GG.isInstance)
myPrint += "\n Synchro OE: %10s " % (self.OE.synchro)
myPrint += "\n Update OE: %10s " % (self.OE.update)
myPrint += "\n Update GG: %10s " % (self.GG.update)
myPrint += "\n Status OE:%5s vs GG: %5s" % (self.OE.status, self.GG.status)
if (self.OP is None):
myPrint += "\n Action %s" % "---!!!---NONE---!!!---"
else:
myPrint += "\n Action %s" % type(self.OP).__name__
myPrint += "\n Source %s" % (self.OP.src)
myPrint += "\n comment %s" % (self.OP.info)
return myPrint
class SyncOperation(object):
def __init__(self, src, info, **kw):
self.src = src
self.info = info
for k, v in kw.items():
setattr(self, k, v)
def __str__(self):
return 'in__STR__'
class Create(SyncOperation):
pass
class Update(SyncOperation):
pass
class Delete(SyncOperation):
pass
class NothingToDo(SyncOperation):
pass
class Exclude(SyncOperation):
pass
class google_calendar(osv.AbstractModel):
STR_SERVICE = 'calendar'
_name = 'google.%s' % STR_SERVICE
def generate_data(self, cr, uid, event, isCreating=False, context=None):
if not context:
context = {}
if event.allday:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T').split('T')[0]
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=1), context=context).isoformat('T').split('T')[0]
type = 'date'
vstype = 'dateTime'
else:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
type = 'dateTime'
vstype = 'date'
attendee_list = []
for attendee in event.attendee_ids:
email = tools.email_split(attendee.email)
email = email[0] if email else 'NoEmail@mail.com'
attendee_list.append({
'email': email,
'displayName': attendee.partner_id.name,
'responseStatus': attendee.state or 'needsAction',
})
reminders = []
for alarm in event.alarm_ids:
reminders.append({
"method": "email" if alarm.type == "email" else "popup",
"minutes": alarm.duration_minutes
})
data = {
"summary": event.name or '',
"description": event.description or '',
"start": {
type: start_date,
vstype: None,
'timeZone': context.get('tz', 'UTC'),
},
"end": {
type: final_date,
vstype: None,
'timeZone': context.get('tz', 'UTC'),
},
"attendees": attendee_list,
"reminders": {
"overrides": reminders,
"useDefault": "false"
},
"location": event.location or '',
"visibility": event['class'] or 'public',
}
if event.recurrency and event.rrule:
data["recurrence"] = ["RRULE:" + event.rrule]
if not event.active:
data["state"] = "cancelled"
if not self.get_need_synchro_attendee(cr, uid, context=context):
data.pop("attendees")
if isCreating:
other_google_ids = [other_att.google_internal_event_id for other_att in event.attendee_ids if other_att.google_internal_event_id]
if other_google_ids:
data["id"] = other_google_ids[0]
return data
def create_an_event(self, cr, uid, event, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event, isCreating=True, context=context)
url = "/calendar/v3/calendars/%s/events?fields=%s&access_token=%s" % ('primary', urllib2.quote('id,updated'), self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='POST', context=context)
def delete_an_event(self, cr, uid, event_id, context=None):
gs_pool = self.pool['google.service']
params = {
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event_id)
return gs_pool._do_request(cr, uid, url, params, headers, type='DELETE', context=context)
def get_calendar_primary_id(self, cr, uid, context=None):
params = {
'fields': 'id',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/primary"
try:
st, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except Exception, e:
if (e.code == 401): # Token invalid / Acces unauthorized
error_msg = "Your token is invalid or has been revoked !"
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_token': False, 'google_calendar_token_validity': False}, context=context)
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
raise
return (status_response(st), content['id'] or False, ask_time)
def get_event_synchro_dict(self, cr, uid, lastSync=False, token=False, nextPageToken=False, context=None):
if not token:
token = self.get_token(cr, uid, context)
params = {
'fields': 'items,nextPageToken',
'access_token': token,
'maxResults': 1000,
#'timeMin': self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz"),
}
if lastSync:
params['updatedMin'] = lastSync.strftime("%Y-%m-%dT%H:%M:%S.%fz")
params['showDeleted'] = True
else:
params['timeMin'] = self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz")
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events" % 'primary'
if nextPageToken:
params['pageToken'] = nextPageToken
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
google_events_dict = {}
for google_event in content['items']:
google_events_dict[google_event['id']] = google_event
if content.get('nextPageToken'):
google_events_dict.update(
self.get_event_synchro_dict(cr, uid, lastSync=lastSync, token=token, nextPageToken=content['nextPageToken'], context=context)
)
return google_events_dict
def get_one_event_synchro(self, cr, uid, google_id, context=None):
token = self.get_token(cr, uid, context)
params = {
'access_token': token,
'maxResults': 1000,
'showDeleted': True,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', google_id)
try:
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except:
_logger.info("Calendar Synchro - In except of get_one_event_synchro")
pass
return status_response(status) and content or False
def update_to_google(self, cr, uid, oe_event, google_event, context):
calendar_event = self.pool['calendar.event']
url = "/calendar/v3/calendars/%s/events/%s?fields=%s&access_token=%s" % ('primary', google_event['id'], 'id,updated', self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = self.generate_data(cr, uid, oe_event, context=context)
data['sequence'] = google_event.get('sequence', 0)
data_json = simplejson.dumps(data)
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, data_json, headers, type='PATCH', context=context)
update_date = datetime.strptime(content['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
calendar_event.write(cr, uid, [oe_event.id], {'oe_update_date': update_date})
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date}, context)
def update_an_event(self, cr, uid, event, context=None):
data = self.generate_data(cr, uid, event, context=context)
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event.google_internal_event_id)
headers = {}
data['access_token'] = self.get_token(cr, uid, context)
status, response, ask_time = self.pool['google.service']._do_request(cr, uid, url, data, headers, type='GET', context=context)
#TO_CHECK : , if http fail, no event, do DELETE ?
return response
def update_recurrent_event_exclu(self, cr, uid, instance_id, event_ori_google_id, event_new, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event_new, context=context)
data['recurringEventId'] = event_ori_google_id
data['originalStartTime'] = event_new.recurrent_id_date
url = "/calendar/v3/calendars/%s/events/%s?access_token=%s" % ('primary', instance_id, self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json'}
data['sequence'] = self.get_sequence(cr, uid, instance_id, context)
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='PUT', context=context)
def update_from_google(self, cr, uid, event, single_event_dict, type, context):
if context is None:
context = []
calendar_event = self.pool['calendar.event']
res_partner_obj = self.pool['res.partner']
calendar_attendee_obj = self.pool['calendar.attendee']
calendar_alarm_obj = self.pool['calendar.alarm']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context).partner_id.id
attendee_record = []
alarm_record = set()
partner_record = [(4, myPartnerID)]
result = {}
if self.get_need_synchro_attendee(cr, uid, context=context):
for google_attendee in single_event_dict.get('attendees', []):
partner_email = google_attendee.get('email', False)
if type == "write":
for oe_attendee in event['attendee_ids']:
if oe_attendee.email == partner_email:
calendar_attendee_obj.write(cr, uid, [oe_attendee.id], {'state': google_attendee['responseStatus']}, context=context)
google_attendee['found'] = True
continue
if google_attendee.get('found'):
continue
attendee_id = res_partner_obj.search(cr, uid, [('email', '=', partner_email)], context=context)
if not attendee_id:
data = {
'email': partner_email,
'customer': False,
'name': google_attendee.get("displayName", False) or partner_email
}
attendee_id = [res_partner_obj.create(cr, uid, data, context=context)]
attendee = res_partner_obj.read(cr, uid, attendee_id[0], ['email'], context=context)
partner_record.append((4, attendee.get('id')))
attendee['partner_id'] = attendee.pop('id')
attendee['state'] = google_attendee['responseStatus']
attendee_record.append((0, 0, attendee))
for google_alarm in single_event_dict.get('reminders', {}).get('overrides', []):
alarm_id = calendar_alarm_obj.search(
cr,
uid,
[
('type', '=', google_alarm['method'] if google_alarm['method'] == 'email' else 'notification'),
('duration_minutes', '=', google_alarm['minutes'])
],
context=context
)
if not alarm_id:
data = {
'type': google_alarm['method'] if google_alarm['method'] == 'email' else 'notification',
'duration': google_alarm['minutes'],
'interval': 'minutes',
'name': "%s minutes - %s" % (google_alarm['minutes'], google_alarm['method'])
}
alarm_id = [calendar_alarm_obj.create(cr, uid, data, context=context)]
alarm_record.add(alarm_id[0])
UTC = pytz.timezone('UTC')
if single_event_dict.get('start') and single_event_dict.get('end'): # If not cancelled
if single_event_dict['start'].get('dateTime', False) and single_event_dict['end'].get('dateTime', False):
date = parser.parse(single_event_dict['start']['dateTime'])
stop = parser.parse(single_event_dict['end']['dateTime'])
date = str(date.astimezone(UTC))[:-6]
stop = str(stop.astimezone(UTC))[:-6]
allday = False
else:
date = (single_event_dict['start']['date'])
stop = (single_event_dict['end']['date'])
d_end = datetime.strptime(stop, DEFAULT_SERVER_DATE_FORMAT)
allday = True
d_end = d_end + timedelta(days=-1)
stop = d_end.strftime(DEFAULT_SERVER_DATE_FORMAT)
update_date = datetime.strptime(single_event_dict['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
result.update({
'start': date,
'stop': stop,
'allday': allday
})
result.update({
'attendee_ids': attendee_record,
'partner_ids': list(set(partner_record)),
'alarm_ids': [(6, 0, list(alarm_record))],
'name': single_event_dict.get('summary', 'Event'),
'description': single_event_dict.get('description', False),
'location': single_event_dict.get('location', False),
'class': single_event_dict.get('visibility', 'public'),
'oe_update_date': update_date,
})
if single_event_dict.get("recurrence", False):
rrule = [rule for rule in single_event_dict["recurrence"] if rule.startswith("RRULE:")][0][6:]
result['rrule'] = rrule
context = dict(context or {}, no_mail_to_attendees=True)
if type == "write":
res = calendar_event.write(cr, uid, event['id'], result, context=context)
elif type == "copy":
result['recurrence'] = True
res = calendar_event.write(cr, uid, [event['id']], result, context=context)
elif type == "create":
res = calendar_event.create(cr, uid, result, context=context)
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date, 'google_internal_event_id': single_event_dict.get('id', False)}, context)
return res
def remove_references(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
reset_data = {
'google_calendar_rtoken': False,
'google_calendar_token': False,
'google_calendar_token_validity': False,
'google_calendar_last_sync_date': False,
'google_calendar_cal_id': False,
}
all_my_attendees = self.pool['calendar.attendee'].search(cr, uid, [('partner_id', '=', current_user.partner_id.id)], context=context)
self.pool['calendar.attendee'].write(cr, uid, all_my_attendees, {'oe_synchro_date': False, 'google_internal_event_id': False}, context=context)
current_user.write(reset_data)
return True
def synchronize_events_cron(self, cr, uid, context=None):
ids = self.pool['res.users'].search(cr, uid, [('google_calendar_last_sync_date', '!=', False)], context=context)
_logger.info("Calendar Synchro - Started by cron")
for user_to_sync in ids:
_logger.info("Calendar Synchro - Starting synchronization for a new user [%s] " % user_to_sync)
try:
resp = self.synchronize_events(cr, user_to_sync, False, lastSync=True, context=None)
if resp.get("status") == "need_reset":
_logger.info("[%s] Calendar Synchro - Failed - NEED RESET !" % user_to_sync)
else:
_logger.info("[%s] Calendar Synchro - Done with status : %s !" % (user_to_sync, resp.get("status")))
except Exception, e:
_logger.info("[%s] Calendar Synchro - Exception : %s !" % (user_to_sync, exception_to_unicode(e)))
_logger.info("Calendar Synchro - Ended by cron")
def synchronize_events(self, cr, uid, ids, lastSync=True, context=None):
if context is None:
context = {}
# def isValidSync(syncToken):
# gs_pool = self.pool['google.service']
# params = {
# 'maxResults': 1,
# 'fields': 'id',
# 'access_token': self.get_token(cr, uid, context),
# 'syncToken': syncToken,
# }
# url = "/calendar/v3/calendars/primary/events"
# status, response = gs_pool._do_request(cr, uid, url, params, type='GET', context=context)
# return int(status) != 410
user_to_sync = ids and ids[0] or uid
current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_to_sync, context=context)
st, current_google, ask_time = self.get_calendar_primary_id(cr, user_to_sync, context=context)
if current_user.google_calendar_cal_id:
if current_google != current_user.google_calendar_cal_id:
return {
"status": "need_reset",
"info": {
"old_name": current_user.google_calendar_cal_id,
"new_name": current_google
},
"url": ''
}
if lastSync and self.get_last_sync_date(cr, user_to_sync, context=context) and not self.get_disable_since_synchro(cr, user_to_sync, context=context):
lastSync = self.get_last_sync_date(cr, user_to_sync, context)
_logger.info("[%s] Calendar Synchro - MODE SINCE_MODIFIED : %s !" % (user_to_sync, lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT)))
else:
lastSync = False
_logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO FORCED" % user_to_sync)
else:
current_user.write({'google_calendar_cal_id': current_google})
lastSync = False
_logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO - NEW CAL ID" % user_to_sync)
new_ids = []
new_ids += self.create_new_events(cr, user_to_sync, context=context)
new_ids += self.bind_recurring_events_to_google(cr, user_to_sync, context)
res = self.update_events(cr, user_to_sync, lastSync, context)
current_user.write({'google_calendar_last_sync_date': ask_time})
return {
"status": res and "need_refresh" or "no_new_event_from_google",
"url": ''
}
def create_new_events(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID),
('google_internal_event_id', '=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
other_google_ids = [other_att.google_internal_event_id for other_att in att.event_id.attendee_ids if other_att.google_internal_event_id and other_att.id != att.id]
for other_google_id in other_google_ids:
if self.get_one_event_synchro(cr, uid, other_google_id, context=context):
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': other_google_id})
break
else:
if not att.event_id.recurrent_id or att.event_id.recurrent_id == 0:
st, response, ask_time = self.create_an_event(cr, uid, att.event_id, context=context)
if status_response(st):
update_date = datetime.strptime(response['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
ev_obj.write(cr, uid, att.event_id.id, {'oe_update_date': update_date})
new_ids.append(response['id'])
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': response['id'], 'oe_synchro_date': update_date})
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
return new_ids
def get_context_no_virtual(self, context):
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
context_norecurrent['active_test'] = False
return context_norecurrent
def bind_recurring_events_to_google(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = self.get_context_no_virtual(context)
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('google_internal_event_id', '=', False)], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if att.event_id.recurrent_id and att.event_id.recurrent_id > 0:
new_google_internal_event_id = False
source_event_record = ev_obj.browse(cr, uid, att.event_id.recurrent_id, context)
source_attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', source_event_record.id)], context=context)
if not source_attendee_record_id:
continue
source_attendee_record = att_obj.browse(cr, uid, source_attendee_record_id, context)[0]
if att.event_id.recurrent_id_date and source_event_record.allday and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.split(' ')[0].replace('-', '')
elif att.event_id.recurrent_id_date and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.replace('-', '').replace(' ', 'T').replace(':', '') + 'Z'
if new_google_internal_event_id:
#TODO WARNING, NEED TO CHECK THAT EVENT and ALL instance NOT DELETE IN GMAIL BEFORE !
try:
st, response, ask_time = self.update_recurrent_event_exclu(cr, uid, new_google_internal_event_id, source_attendee_record.google_internal_event_id, att.event_id, context=context)
if status_response(st):
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': new_google_internal_event_id}, context=context)
new_ids.append(new_google_internal_event_id)
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.warning("Response : %s" % response)
except:
pass
return new_ids
def update_events(self, cr, uid, lastSync=False, context=None):
context = dict(context or {})
calendar_event = self.pool['calendar.event']
user_obj = self.pool['res.users']
att_obj = self.pool['calendar.attendee']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_novirtual = self.get_context_no_virtual(context)
if lastSync:
try:
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=lastSync, context=context)
except urllib2.HTTPError, e:
if e.code == 410: # GONE, Google is lost.
# we need to force the rollback from this cursor, because it locks my res_users but I need to write in this tuple before to raise.
cr.rollback()
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_last_sync_date': False}, context=context)
error_key = simplejson.loads(str(e))
error_key = error_key.get('error', {}).get('message', 'nc')
error_msg = "Google is lost... the next synchro will be a full synchro. \n\n %s" % error_key
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
my_google_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('google_internal_event_id', 'in', all_event_from_google.keys())
], context=context_novirtual)
my_openerp_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('event_id.oe_update_date', '>', lastSync and lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('google_internal_event_id', '!=', False),
], context=context_novirtual)
my_openerp_googleinternal_ids = att_obj.read(cr, uid, my_openerp_att_ids, ['google_internal_event_id', 'event_id'], context=context_novirtual)
if self.get_print_log(cr, uid, context=context):
_logger.info("Calendar Synchro - \n\nUPDATE IN GOOGLE\n%s\n\nRETRIEVE FROM OE\n%s\n\nUPDATE IN OE\n%s\n\nRETRIEVE FROM GG\n%s\n\n" % (all_event_from_google, my_google_att_ids, my_openerp_att_ids, my_openerp_googleinternal_ids))
for giid in my_openerp_googleinternal_ids:
active = True # if not sure, we request google
if giid.get('event_id'):
active = calendar_event.browse(cr, uid, int(giid.get('event_id')[0]), context=context_novirtual).active
if giid.get('google_internal_event_id') and not all_event_from_google.get(giid.get('google_internal_event_id')) and active:
one_event = self.get_one_event_synchro(cr, uid, giid.get('google_internal_event_id'), context=context)
if one_event:
all_event_from_google[one_event['id']] = one_event
my_att_ids = list(set(my_google_att_ids + my_openerp_att_ids))
else:
domain = [
('partner_id', '=', myPartnerID),
('google_internal_event_id', '!=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
]
# Select all events from OpenERP which have been already synchronized in gmail
my_att_ids = att_obj.search(cr, uid, domain, context=context_novirtual)
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=False, context=context)
event_to_synchronize = {}
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
event = att.event_id
base_event_id = att.google_internal_event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if att.google_internal_event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][att.google_internal_event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][att.google_internal_event_id]
ev_to_sync.OE.attendee_id = att.id
ev_to_sync.OE.event = event
ev_to_sync.OE.found = True
ev_to_sync.OE.event_id = event.id
ev_to_sync.OE.isRecurrence = event.recurrency
ev_to_sync.OE.isInstance = bool(event.recurrent_id and event.recurrent_id > 0)
ev_to_sync.OE.update = event.oe_update_date
ev_to_sync.OE.status = event.active
ev_to_sync.OE.synchro = att.oe_synchro_date
for event in all_event_from_google.values():
event_id = event.get('id')
base_event_id = event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][event_id]
ev_to_sync.GG.event = event
ev_to_sync.GG.found = True
ev_to_sync.GG.isRecurrence = bool(event.get('recurrence', ''))
ev_to_sync.GG.isInstance = bool(event.get('recurringEventId', 0))
ev_to_sync.GG.update = event.get('updated', None) # if deleted, no date without browse event
if ev_to_sync.GG.update:
ev_to_sync.GG.update = ev_to_sync.GG.update.replace('T', ' ').replace('Z', '')
ev_to_sync.GG.status = (event.get('status') != 'cancelled')
######################
# PRE-PROCESSING #
######################
for base_event in event_to_synchronize:
for current_event in event_to_synchronize[base_event]:
event_to_synchronize[base_event][current_event].compute_OP(modeFull=not lastSync)
if self.get_print_log(cr, uid, context=context):
if not isinstance(event_to_synchronize[base_event][current_event].OP, NothingToDo):
_logger.info(event_to_synchronize[base_event])
######################
# DO ACTION #
######################
for base_event in event_to_synchronize:
event_to_synchronize[base_event] = sorted(event_to_synchronize[base_event].iteritems(), key=operator.itemgetter(0))
for current_event in event_to_synchronize[base_event]:
cr.commit()
event = current_event[1] # event is an Sync Event !
actToDo = event.OP
actSrc = event.OP.src
context['curr_attendee'] = event.OE.attendee_id
if isinstance(actToDo, NothingToDo):
continue
elif isinstance(actToDo, Create):
context_tmp = context.copy()
context_tmp['NewMeeting'] = True
if actSrc == 'GG':
res = self.update_from_google(cr, uid, False, event.GG.event, "create", context=context_tmp)
event.OE.event_id = res
meeting = calendar_event.browse(cr, uid, res, context=context)
attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', res)], context=context)
self.pool['calendar.attendee'].write(cr, uid, attendee_record_id, {'oe_synchro_date': meeting.oe_update_date, 'google_internal_event_id': event.GG.event['id']}, context=context_tmp)
elif actSrc == 'OE':
raise "Should be never here, creation for OE is done before update !"
#TODO Add to batch
elif isinstance(actToDo, Update):
if actSrc == 'GG':
self.update_from_google(cr, uid, event.OE.event, event.GG.event, 'write', context)
elif actSrc == 'OE':
self.update_to_google(cr, uid, event.OE.event, event.GG.event, context)
elif isinstance(actToDo, Exclude):
if actSrc == 'OE':
self.delete_an_event(cr, uid, current_event[0], context=context)
elif actSrc == 'GG':
new_google_event_id = event.GG.event['id'].rsplit('_', 1)[1]
if 'T' in new_google_event_id:
new_google_event_id = new_google_event_id.replace('T', '')[:-1]
else:
new_google_event_id = new_google_event_id + "000000"
if event.GG.status:
parent_event = {}
if not event_to_synchronize[base_event][0][1].OE.event_id:
main_ev = att_obj.search_read(cr, uid, [('google_internal_event_id', '=', event.GG.event['id'].rsplit('_', 1)[0])], fields=['event_id'], context=context_novirtual)
event_to_synchronize[base_event][0][1].OE.event_id = main_ev[0].get('event_id')[0]
parent_event['id'] = "%s-%s" % (event_to_synchronize[base_event][0][1].OE.event_id, new_google_event_id)
res = self.update_from_google(cr, uid, parent_event, event.GG.event, "copy", context)
else:
parent_oe_id = event_to_synchronize[base_event][0][1].OE.event_id
if parent_oe_id:
calendar_event.unlink(cr, uid, "%s-%s" % (parent_oe_id, new_google_event_id), can_be_deleted=True, context=context)
elif isinstance(actToDo, Delete):
if actSrc == 'GG':
try:
self.delete_an_event(cr, uid, current_event[0], context=context)
except Exception, e:
error = simplejson.loads(e.read())
error_nr = error.get('error', {}).get('code')
# if already deleted from gmail or never created
if error_nr in (404, 410,):
pass
else:
raise e
elif actSrc == 'OE':
calendar_event.unlink(cr, uid, event.OE.event_id, can_be_deleted=False, context=context)
return True
def check_and_sync(self, cr, uid, oe_event, google_event, context):
if datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") > datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_to_google(cr, uid, oe_event, google_event, context)
elif datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") < datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_from_google(cr, uid, oe_event, google_event, 'write', context)
def get_sequence(self, cr, uid, instance_id, context=None):
gs_pool = self.pool['google.service']
params = {
'fields': 'sequence',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', instance_id)
st, content, ask_time = gs_pool._do_request(cr, uid, url, params, headers, type='GET', context=context)
return content.get('sequence', 0)
#################################
## MANAGE CONNEXION TO GMAIL ##
#################################
def get_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not current_user.google_calendar_token_validity or \
datetime.strptime(current_user.google_calendar_token_validity.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) < (datetime.now() + timedelta(minutes=1)):
self.do_refresh_token(cr, uid, context=context)
current_user.refresh()
return current_user.google_calendar_token
def get_last_sync_date(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_last_sync_date and datetime.strptime(current_user.google_calendar_last_sync_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes=0) or False
def do_refresh_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
gs_pool = self.pool['google.service']
all_token = gs_pool._refresh_google_token_json(cr, uid, current_user.google_calendar_rtoken, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def need_authorize(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_rtoken is False
def get_calendar_scope(self, RO=False):
readonly = RO and '.readonly' or ''
return 'https://www.googleapis.com/auth/calendar%s' % (readonly)
def authorize_google_uri(self, cr, uid, from_url='http://www.openerp.com', context=None):
url = self.pool['google.service']._get_authorize_uri(cr, uid, from_url, self.STR_SERVICE, scope=self.get_calendar_scope(), context=context)
return url
def can_authorize_google(self, cr, uid, context=None):
return self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')
def set_all_tokens(self, cr, uid, authorization_code, context=None):
gs_pool = self.pool['google.service']
all_token = gs_pool._get_google_token_json(cr, uid, authorization_code, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_rtoken' % self.STR_SERVICE] = all_token.get('refresh_token')
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def get_minTime(self, cr, uid, context=None):
number_of_week = int(self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.week_synchro', default=13))
return datetime.now() - timedelta(weeks=number_of_week)
def get_need_synchro_attendee(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_synchro_attendee', default=True)
def get_disable_since_synchro(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_since_synchro', default=False)
def get_print_log(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.debug_print', default=False)
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'google_calendar_rtoken': fields.char('Refresh Token'),
'google_calendar_token': fields.char('User token'),
'google_calendar_token_validity': fields.datetime('Token Validity'),
'google_calendar_last_sync_date': fields.datetime('Last synchro date'),
'google_calendar_cal_id': fields.char('Calendar ID', help='Last Calendar ID who has been synchronized. If it is changed, we remove \
all links between GoogleID and Odoo Google Internal ID')
}
class calendar_event(osv.Model):
_inherit = "calendar.event"
def get_fields_need_update_google(self, cr, uid, context=None):
return ['name', 'description', 'allday', 'start', 'date_end', 'stop',
'attendee_ids', 'alarm_ids', 'location', 'class', 'active',
'start_date', 'start_datetime', 'stop_date', 'stop_datetime']
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
sync_fields = set(self.get_fields_need_update_google(cr, uid, context))
if (set(vals.keys()) & sync_fields) and 'oe_update_date' not in vals.keys() and 'NewMeeting' not in context:
vals['oe_update_date'] = datetime.now()
return super(calendar_event, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
if default.get('write_type', False):
del default['write_type']
elif default.get('recurrent_id', False):
default['oe_update_date'] = datetime.now()
else:
default['oe_update_date'] = False
return super(calendar_event, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, can_be_deleted=False, context=None):
return super(calendar_event, self).unlink(cr, uid, ids, can_be_deleted=can_be_deleted, context=context)
_columns = {
'oe_update_date': fields.datetime('Odoo Update Date'),
}
class calendar_attendee(osv.Model):
_inherit = 'calendar.attendee'
_columns = {
'google_internal_event_id': fields.char('Google Calendar Event Id'),
'oe_synchro_date': fields.datetime('Odoo Synchro Date'),
}
_sql_constraints = [('google_id_uniq', 'unique(google_internal_event_id,partner_id,event_id)', 'Google ID should be unique!')]
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
for id in ids:
ref = vals.get('event_id', self.browse(cr, uid, id, context=context).event_id.id)
# If attendees are updated, we need to specify that next synchro need an action
# Except if it come from an update_from_google
if not context.get('curr_attendee', False) and not context.get('NewMeeting', False):
self.pool['calendar.event'].write(cr, uid, ref, {'oe_update_date': datetime.now()}, context)
return super(calendar_attendee, self).write(cr, uid, ids, vals, context=context)
|
dfalt974/SickRage | refs/heads/master | lib/html5lib/_trie/_base.py | 79 | from __future__ import absolute_import, division, unicode_literals
from collections import Mapping
class Trie(Mapping):
"""Abstract base class for tries"""
def keys(self, prefix=None):
# pylint:disable=arguments-differ
keys = super(Trie, self).keys()
if prefix is None:
return set(keys)
return {x for x in keys if x.startswith(prefix)}
def has_keys_with_prefix(self, prefix):
for key in self.keys():
if key.startswith(prefix):
return True
return False
def longest_prefix(self, prefix):
if prefix in self:
return prefix
for i in range(1, len(prefix) + 1):
if prefix[:-i] in self:
return prefix[:-i]
raise KeyError(prefix)
def longest_prefix_item(self, prefix):
lprefix = self.longest_prefix(prefix)
return (lprefix, self[lprefix])
|
edisonlz/fruit | refs/heads/master | web_project/base/site-packages/grappelli/__init__.py | 1 | VERSION = '2.3.6' |
ULHPC/modules | refs/heads/devel | easybuild/easybuild-easyblocks/easybuild/easyblocks/m/metavelvet.py | 12 | ##
# This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild
#
# Copyright:: Copyright 2012-2015 Uni.Lu/LCSB, NTUA
# Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Fotis Georgatos <fotis@cern.ch>, Kenneth Hoste
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html
##
"""
EasyBuild support for building and installing MetaVelvet, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
class EB_MetaVelvet(ConfigureMake):
"""
Support for building MetaVelvet
"""
def configure_step(self):
"""
No configure
"""
pass
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
# Get executable files: for i in $(find . -maxdepth 1 -type f -perm +111 -print | sed -e 's/\.\///g' | awk '{print "\""$0"\""}' | grep -vE "\.sh|\.html"); do echo -ne "$i, "; done && echo
try:
os.makedirs(destdir)
for filename in ["meta-velvetg"]:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err)
def sanity_check_step(self):
"""Custom sanity check for MetaVelvet."""
custom_paths = {
'files': ['bin/meta-velvetg'],
'dirs': []
}
super(EB_MetaVelvet, self).sanity_check_step(custom_paths=custom_paths)
|
igorg1312/googlepythonsskeleton | refs/heads/master | lib/jinja2/loaders.py | 333 | # -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8+
The *followlinks* parameter was added.
"""
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
|
ArduPilot/MAVProxy | refs/heads/master | MAVProxy/modules/mavproxy_mode.py | 3 | #!/usr/bin/env python
'''mode command handling'''
import time, os
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_module
class ModeModule(mp_module.MPModule):
def __init__(self, mpstate):
super(ModeModule, self).__init__(mpstate, "mode", public=True)
self.add_command('mode', self.cmd_mode, "mode change", self.available_modes())
self.add_command('guided', self.cmd_guided, "fly to a clicked location on map")
def cmd_mode(self, args):
'''set arbitrary mode'''
mode_mapping = self.master.mode_mapping()
if mode_mapping is None:
print('No mode mapping available')
return
if len(args) != 1:
print('Available modes: ', mode_mapping.keys())
return
if args[0].isdigit():
modenum = int(args[0])
else:
mode = args[0].upper()
if mode not in mode_mapping:
print('Unknown mode %s: ' % mode)
return
modenum = mode_mapping[mode]
self.master.set_mode(modenum)
def available_modes(self):
if self.master is None:
print('No mode mapping available')
return []
mode_mapping = self.master.mode_mapping()
if mode_mapping is None:
print('No mode mapping available')
return []
return mode_mapping.keys()
def unknown_command(self, args):
'''handle mode switch by mode name as command'''
mode_mapping = self.master.mode_mapping()
mode = args[0].upper()
if mode in mode_mapping:
self.master.set_mode(mode_mapping[mode])
return True
return False
def cmd_guided(self, args):
'''set GUIDED target'''
if len(args) != 1 and len(args) != 3:
print("Usage: guided ALTITUDE | guided LAT LON ALTITUDE")
return
if len(args) == 3:
latitude = float(args[0])
longitude = float(args[1])
altitude = float(args[2])
latlon = (latitude, longitude)
else:
latlon = self.mpstate.click_location
if latlon is None:
print("No map click position available")
return
altitude = float(args[0])
print("Guided %s %s" % (str(latlon), str(altitude)))
self.master.mav.mission_item_int_send (self.settings.target_system,
self.settings.target_component,
0,
self.module('wp').get_default_frame(),
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
2, 0, 0, 0, 0, 0,
int(latlon[0]*1.0e7),
int(latlon[1]*1.0e7),
altitude)
def init(mpstate):
'''initialise module'''
return ModeModule(mpstate)
|
drxos/python-social-auth | refs/heads/master | social/tests/test_utils.py | 73 | import sys
import unittest2 as unittest
from mock import Mock
from social.utils import sanitize_redirect, user_is_authenticated, \
user_is_active, slugify, build_absolute_uri, \
partial_pipeline_data
PY3 = sys.version_info[0] == 3
class SanitizeRedirectTest(unittest.TestCase):
def test_none_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', None), None)
def test_empty_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', ''), None)
def test_dict_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', {}), None)
def test_invalid_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', {'foo': 'bar'}), None)
def test_wrong_path_redirect(self):
self.assertEqual(
sanitize_redirect('myapp.com', 'http://notmyapp.com/path/'),
None
)
def test_valid_absolute_redirect(self):
self.assertEqual(
sanitize_redirect('myapp.com', 'http://myapp.com/path/'),
'http://myapp.com/path/'
)
def test_valid_relative_redirect(self):
self.assertEqual(sanitize_redirect('myapp.com', '/path/'), '/path/')
class UserIsAuthenticatedTest(unittest.TestCase):
def test_user_is_none(self):
self.assertEqual(user_is_authenticated(None), False)
def test_user_is_not_none(self):
self.assertEqual(user_is_authenticated(object()), True)
def test_user_has_is_authenticated(self):
class User(object):
is_authenticated = True
self.assertEqual(user_is_authenticated(User()), True)
def test_user_has_is_authenticated_callable(self):
class User(object):
def is_authenticated(self):
return True
self.assertEqual(user_is_authenticated(User()), True)
class UserIsActiveTest(unittest.TestCase):
def test_user_is_none(self):
self.assertEqual(user_is_active(None), False)
def test_user_is_not_none(self):
self.assertEqual(user_is_active(object()), True)
def test_user_has_is_active(self):
class User(object):
is_active = True
self.assertEqual(user_is_active(User()), True)
def test_user_has_is_active_callable(self):
class User(object):
def is_active(self):
return True
self.assertEqual(user_is_active(User()), True)
class SlugifyTest(unittest.TestCase):
def test_slugify_formats(self):
if PY3:
self.assertEqual(slugify('FooBar'), 'foobar')
self.assertEqual(slugify('Foo Bar'), 'foo-bar')
self.assertEqual(slugify('Foo (Bar)'), 'foo-bar')
else:
self.assertEqual(slugify('FooBar'.decode('utf-8')), 'foobar')
self.assertEqual(slugify('Foo Bar'.decode('utf-8')), 'foo-bar')
self.assertEqual(slugify('Foo (Bar)'.decode('utf-8')), 'foo-bar')
class BuildAbsoluteURITest(unittest.TestCase):
def setUp(self):
self.host = 'http://foobar.com'
def tearDown(self):
self.host = None
def test_path_none(self):
self.assertEqual(build_absolute_uri(self.host), self.host)
def test_path_empty(self):
self.assertEqual(build_absolute_uri(self.host, ''), self.host)
def test_path_http(self):
self.assertEqual(build_absolute_uri(self.host, 'http://barfoo.com'),
'http://barfoo.com')
def test_path_https(self):
self.assertEqual(build_absolute_uri(self.host, 'https://barfoo.com'),
'https://barfoo.com')
def test_host_ends_with_slash_and_path_starts_with_slash(self):
self.assertEqual(build_absolute_uri(self.host + '/', '/foo/bar'),
'http://foobar.com/foo/bar')
def test_absolute_uri(self):
self.assertEqual(build_absolute_uri(self.host, '/foo/bar'),
'http://foobar.com/foo/bar')
class PartialPipelineData(unittest.TestCase):
def test_kwargs_included_in_result(self):
backend = self._backend()
key, val = ('foo', 'bar')
_, xkwargs = partial_pipeline_data(backend, None,
*(), **dict([(key, val)]))
self.assertTrue(key in xkwargs)
self.assertEqual(xkwargs[key], val)
def test_update_user(self):
user = object()
backend = self._backend(session_kwargs={'user': None})
_, xkwargs = partial_pipeline_data(backend, user)
self.assertTrue('user' in xkwargs)
self.assertEqual(xkwargs['user'], user)
def _backend(self, session_kwargs=None):
strategy = Mock()
strategy.request = None
strategy.session_get.return_value = object()
strategy.partial_from_session.return_value = \
(0, 'mock-backend', [], session_kwargs or {})
backend = Mock()
backend.name = 'mock-backend'
backend.strategy = strategy
return backend
|
kstrauser/ansible | refs/heads/devel | lib/ansible/plugins/shell/__init__.py | 7690 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
MungoRae/home-assistant | refs/heads/dev | homeassistant/components/remote/itach.py | 3 | """
Support for iTach IR Devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/remote.itach/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
import homeassistant.components.remote as remote
from homeassistant.const import (
DEVICE_DEFAULT_NAME, CONF_NAME, CONF_MAC, CONF_HOST, CONF_PORT,
CONF_DEVICES)
from homeassistant.components.remote import PLATFORM_SCHEMA
REQUIREMENTS = ['pyitachip2ir==0.0.6']
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 4998
CONNECT_TIMEOUT = 5000
CONF_MODADDR = 'modaddr'
CONF_CONNADDR = 'connaddr'
CONF_COMMANDS = 'commands'
CONF_DATA = 'data'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MAC): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_MODADDR): vol.Coerce(int),
vol.Required(CONF_CONNADDR): vol.Coerce(int),
vol.Required(CONF_COMMANDS): vol.All(cv.ensure_list, [{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_DATA): cv.string
}])
}])
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the ITach connection and devices."""
import pyitachip2ir
itachip2ir = pyitachip2ir.ITachIP2IR(
config.get(CONF_MAC), config.get(CONF_HOST),
int(config.get(CONF_PORT)))
if not itachip2ir.ready(CONNECT_TIMEOUT):
_LOGGER.error("Unable to find iTach")
return False
devices = []
for data in config.get(CONF_DEVICES):
name = data.get(CONF_NAME)
modaddr = int(data.get(CONF_MODADDR, 1))
connaddr = int(data.get(CONF_CONNADDR, 1))
cmddatas = ""
for cmd in data.get(CONF_COMMANDS):
cmdname = cmd[CONF_NAME].strip()
if not cmdname:
cmdname = '""'
cmddata = cmd[CONF_DATA].strip()
if not cmddata:
cmddata = '""'
cmddatas += "{}\n{}\n".format(cmdname, cmddata)
itachip2ir.addDevice(name, modaddr, connaddr, cmddatas)
devices.append(ITachIP2IRRemote(itachip2ir, name))
add_devices(devices, True)
return True
class ITachIP2IRRemote(remote.RemoteDevice):
"""Device that sends commands to an ITachIP2IR device."""
def __init__(self, itachip2ir, name):
"""Initialize device."""
self.itachip2ir = itachip2ir
self._power = False
self._name = name or DEVICE_DEFAULT_NAME
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._power
def turn_on(self, **kwargs):
"""Turn the device on."""
self._power = True
self.itachip2ir.send(self._name, "ON", 1)
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
self._power = False
self.itachip2ir.send(self._name, "OFF", 1)
self.schedule_update_ha_state()
def send_command(self, command, **kwargs):
"""Send a command to one device."""
for single_command in command:
self.itachip2ir.send(self._name, single_command, 1)
def update(self):
"""Update the device."""
self.itachip2ir.update()
|
Yichuans/ccv | refs/heads/master | languages/hu.py | 162 | # coding: utf8
{
'!langcode!': 'hu',
'!langname!': 'Magyar',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%{row} deleted': '%s sorok törlődtek',
'%s %%{row} updated': '%s sorok frissítődtek',
'%s selected': '%s kiválasztott',
'%Y-%m-%d': '%Y.%m.%d.',
'%Y-%m-%d %H:%M:%S': '%Y.%m.%d. %H:%M:%S',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'az adminisztrációs felületért kattints ide',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': 'az appadmin a biztonságtalan csatorna miatt letiltva',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available Databases and Tables': 'Elérhető adatbázisok és táblák',
'Buy this book': 'Buy this book',
'cache': 'gyorsítótár',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Nem lehet üres',
'change password': 'jelszó megváltoztatása',
'Check to delete': 'Törléshez válaszd ki',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Jelenlegi lekérdezés',
'Current response': 'Jelenlegi válasz',
'Current session': 'Jelenlegi folyamat',
'customize me!': 'változtass meg!',
'data uploaded': 'adat feltöltve',
'Database': 'adatbázis',
'Database %s select': 'adatbázis %s kiválasztás',
'db': 'db',
'DB Model': 'DB Model',
'Delete:': 'Töröl:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Description',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'kész!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Szerkeszt',
'Edit current record': 'Aktuális bejegyzés szerkesztése',
'edit profile': 'profil szerkesztése',
'Edit This App': 'Alkalmazást szerkeszt',
'Email and SMS': 'Email and SMS',
'Errors': 'Errors',
'export as csv file': 'exportál csv fájlba',
'FAQ': 'FAQ',
'First name': 'First name',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group ID': 'Group ID',
'Groups': 'Groups',
'Hello World': 'Hello Világ',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Import/Export',
'Index': 'Index',
'insert new': 'új beillesztése',
'insert new %s': 'új beillesztése %s',
'Internal State': 'Internal State',
'Introduction': 'Introduction',
'Invalid email': 'Invalid email',
'Invalid Query': 'Hibás lekérdezés',
'invalid request': 'hibás kérés',
'Key': 'Key',
'Last name': 'Last name',
'Layout': 'Szerkezet',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'login': 'belép',
'logout': 'kilép',
'lost password': 'elveszett jelszó',
'Lost Password': 'Lost Password',
'Main Menu': 'Főmenü',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menü model',
'My Sites': 'My Sites',
'Name': 'Name',
'New Record': 'Új bejegyzés',
'new record inserted': 'új bejegyzés felvéve',
'next 100 rows': 'következő 100 sor',
'No databases in this application': 'Nincs adatbázis ebben az alkalmazásban',
'Online examples': 'online példákért kattints ide',
'or import from csv file': 'vagy betöltés csv fájlból',
'Origin': 'Origin',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': 'előző 100 sor',
'Python': 'Python',
'Query:': 'Lekérdezés:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'bejegyzés',
'record does not exist': 'bejegyzés nem létezik',
'Record ID': 'Record ID',
'Record id': 'bejegyzés id',
'Register': 'Register',
'register': 'regisztráció',
'Registration key': 'Registration key',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Rows in Table': 'Sorok a táblában',
'Rows selected': 'Kiválasztott sorok',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'állapot',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Biztos törli ezt az objektumot?',
'Table': 'tábla',
'Table name': 'Table name',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'unable to parse csv file': 'nem lehet a csv fájlt beolvasni',
'Update:': 'Frissít:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User ID': 'User ID',
'Videos': 'Videos',
'View': 'Nézet',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Isten hozott a web2py-ban',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
|
sjerdo/letsencrypt | refs/heads/master | letsencrypt/plugins/webroot_test.py | 2 | """Tests for letsencrypt.plugins.webroot."""
import os
import shutil
import tempfile
import unittest
import mock
from acme import challenges
from acme import jose
from letsencrypt import achallenges
from letsencrypt import errors
from letsencrypt.tests import acme_util
from letsencrypt.tests import test_util
KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
class AuthenticatorTest(unittest.TestCase):
"""Tests for letsencrypt.plugins.webroot.Authenticator."""
achall = achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.HTTP01_P, domain=None, account_key=KEY)
def setUp(self):
from letsencrypt.plugins.webroot import Authenticator
self.path = tempfile.mkdtemp()
self.validation_path = os.path.join(
self.path, ".well-known", "acme-challenge",
"ZXZhR3hmQURzNnBTUmIyTEF2OUlaZjE3RHQzanV4R0orUEN0OTJ3citvQQ")
self.config = mock.MagicMock(webroot_path=self.path)
self.auth = Authenticator(self.config, "webroot")
self.auth.prepare()
def tearDown(self):
shutil.rmtree(self.path)
def test_more_info(self):
more_info = self.auth.more_info()
self.assertTrue(isinstance(more_info, str))
self.assertTrue(self.path in more_info)
def test_add_parser_arguments(self):
add = mock.MagicMock()
self.auth.add_parser_arguments(add)
self.assertEqual(1, add.call_count)
def test_prepare_bad_root(self):
self.config.webroot_path = os.path.join(self.path, "null")
self.assertRaises(errors.PluginError, self.auth.prepare)
def test_prepare_missing_root(self):
self.config.webroot_path = None
self.assertRaises(errors.PluginError, self.auth.prepare)
def test_prepare_full_root_exists(self):
# prepare() has already been called once in setUp()
self.auth.prepare() # shouldn't raise any exceptions
def test_prepare_reraises_other_errors(self):
self.auth.full_path = os.path.join(self.path, "null")
os.chmod(self.path, 0o000)
self.assertRaises(errors.PluginError, self.auth.prepare)
os.chmod(self.path, 0o700)
def test_perform_cleanup(self):
responses = self.auth.perform([self.achall])
self.assertEqual(1, len(responses))
self.assertTrue(os.path.exists(self.validation_path))
with open(self.validation_path) as validation_f:
validation = validation_f.read()
self.assertTrue(
challenges.KeyAuthorizationChallengeResponse(
key_authorization=validation).verify(
self.achall.chall, KEY.public_key()))
self.auth.cleanup([self.achall])
self.assertFalse(os.path.exists(self.validation_path))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
ddepaoli3/magnum | refs/heads/master | magnum/__init__.py | 19 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import pbr.version
__version__ = pbr.version.VersionInfo(
'magnum').version_string()
# Make a project global TLS trace storage repository
TLS = threading.local()
|
chvalean/lis-test | refs/heads/master | WS2012R2/lisa/tools/middleware_bench/utils/setup.py | 7 | """
Linux on Hyper-V and Azure Test Code, ver. 1.0.0
Copyright (c) Microsoft Corporation
All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
See the Apache Version 2.0 License for specific language governing
permissions and limitations under the License.
"""
import os
import sys
import time
import logging
from utils import constants
from utils.cmdshell import SSHClient
from report.db_utils import upload_results
from paramiko.ssh_exception import NoValidConnectionsError
from providers.amazon_service import AWSConnector
from providers.azure_service import AzureConnector
from providers.gcp_service import GCPConnector
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%y/%m/%d %H:%M:%S', level=logging.INFO)
log = logging.getLogger(__name__)
class SetupTestEnv:
"""
Setup test environment.
"""
def __init__(self, provider=None, vm_count=None, test_type=None, disk_size=None, raid=None,
keyid=None, secret=None, token=None, subscriptionid=None, tenantid=None,
projectid=None, imageid=None, instancetype=None, user=None, localpath=None,
region=None, zone=None, sriov=False, kernel=None):
"""
Init AWS connector to create and configure AWS ec2 instances.
:param provider Service provider to be used e.g. azure, aws, gce.
:param vm_count: Number of VMs to prepare
:param test_type: vm_disk > 1 VM with disk (Orion and Sysbench)
no_disk > No disk attached (Redis, Memcached, Apache_bench)
db_disk > Second VM with disk (MariaDB, MongoDB)
cluster_disk > All VMs have disks (Terasort)
:param disk_size:
:param raid: Bool or Int (the number of disks), to specify if a RAID will be configured
:param keyid: user key for executing remote connection
:param secret: user secret for executing remote connection
:param token: GCE refresh token obtained with gcloud sdk
:param subscriptionid: Azure specific subscription id
:param tenantid: Azure specific tenant id
:param projectid: GCE specific project id
:param imageid: AWS OS AMI image id or
Azure image references offer and sku: e.g. 'UbuntuServer#16.04.0-LTS' or
GCE image family, e.g. 'ubuntu-1604-lts'
:param instancetype: AWS instance resource type e.g 'd2.4xlarge' or
Azure hardware profile vm size e.g. 'Standard_DS14_v2' or
GCE instance size e.g. 'n1-highmem-16'
:param user: remote ssh user for the instance
:param localpath: localpath where the logs should be downloaded, and the
default path for other necessary tools
:param region: region to connect to
:param zone: zone where other resources should be available
:param sriov: bool for configuring SR-IOV or not
:param kernel: kernel deb name to install provided in localpath
:rtype Tuple
:return: connector <Connector>,
vm_ips <VM private IPs dict>,
device <attached disk devices>,
ssh_client <ssh clients dict>
"""
self.provider = provider
self.vm_count = vm_count
self.test_type = test_type
self.disk_size = disk_size
self.raid = raid
self.keyid = keyid
self.secret = secret
self.token = token
self.subscriptionid = subscriptionid
self.tenantid = tenantid
self.projectid = projectid
self.imageid = imageid
self.instancetype = instancetype
self.user = user
self.localpath = localpath
self.region = region
self.zone = zone
self.sriov = sriov
self.kernel = kernel
# create and generate setup details
try:
self.connector = self.create_connector()
self.vms = self.create_instances()
self.device = self.get_disk_devices()
self.ssh_client, self.vm_ips = self.get_instance_details()
self.perf_tuning()
self.reconnect_sshclient()
except Exception as e:
log.exception(e)
if self.connector:
self.connector.teardown()
raise
def create_connector(self):
"""
Create connector by provider.
:return: connector
"""
connector = None
if self.provider == constants.AWS:
connector = AWSConnector(keyid=self.keyid, secret=self.secret, imageid=self.imageid,
instancetype=self.instancetype, user=self.user,
localpath=self.localpath, region=self.region, zone=self.zone)
elif self.provider == constants.AZURE:
connector = AzureConnector(clientid=self.keyid, secret=self.secret,
subscriptionid=self.subscriptionid, tenantid=self.tenantid,
imageid=self.imageid, instancetype=self.instancetype,
user=self.user, localpath=self.localpath,
location=self.region, sriov=self.sriov)
elif self.provider == constants.GCE:
connector = GCPConnector(clientid=self.keyid, secret=self.secret, token=self.token,
projectid=self.projectid, imageid=self.imageid,
instancetype=self.instancetype, user=self.user,
localpath=self.localpath, zone=self.zone)
if connector:
connector.connect()
return connector
else:
raise Exception('Unsupported provider or connector failed.')
def create_instances(self):
"""
Create instances.
:return: VM instances
"""
open(self.connector.host_key_file, 'w').close()
vms = {}
for i in xrange(1, self.vm_count + 1):
vms[i] = self.connector.create_vm()
return vms
def reconnect_sshclient(self):
if self.provider == constants.AWS:
log.info('The provider is AWS, reconnect sshclient')
for i in xrange(1, self.vm_count + 1):
self.ssh_client[i].connect()
def get_instance_details(self):
"""
Create ssh client and get vm IPs
:return: ssh_client, vm_ips
"""
ssh_client = {}
vm_ips = {}
for i in xrange(1, self.vm_count + 1):
if self.provider == constants.AWS:
ssh_client[i] = self.connector.wait_for_ping(self.vms[i])
# SRIOV is enabled by default on AWS for the tested platforms
# if sriov == constants.ENABLED:
# ssh_client[i] = connector.enable_sr_iov(vms[i], ssh_client[i])
self.vms[i].update()
vm_ips[i] = self.vms[i].private_ip_address
elif self.provider == constants.AZURE:
ssh_client[i] = SSHClient(server=self.vms[i].name + self.connector.dns_suffix,
host_key_file=self.connector.host_key_file,
user=self.connector.user,
ssh_key_file=os.path.join(
self.connector.localpath,
self.connector.key_name + '.pem'))
ip = ssh_client[i].run(
'ifconfig eth0 | grep "inet\ addr" | cut -d: -f2 | cut -d" " -f1')
vm_ips[i] = ip[1].strip()
elif self.provider == constants.GCE:
ssh_client[i] = self.connector.wait_for_ping(self.vms[i])
vm_ips[i] = self.vms[i]['networkInterfaces'][0]['networkIP']
return ssh_client, vm_ips
def attach_raid_disks(self, vm_tag, disk_args):
device = []
for i in xrange(self.raid):
if self.provider == constants.AWS:
disk_args['device'] = '/dev/sd{}'.format(chr(120 - i))
device.append(disk_args['device'].replace('sd', 'xvd'))
elif self.provider == constants.AZURE:
disk_args['device'] = i
device.append('/dev/sd{}'.format(chr(99 + i)))
elif self.provider == constants.GCE:
device.append('/dev/sd{}'.format(chr(98 + i)))
self.connector.attach_disk(self.vms[vm_tag], disk_size=self.disk_size, **disk_args)
return device
def get_disk_devices(self):
if not self.test_type:
return None
device = None
disk_args = {}
if self.provider == constants.AWS:
device = constants.DEVICE_AWS.replace('sd', 'xvd')
disk_args['iops'] = 5000
disk_args['volume_type'] = self.connector.volume_type['ssd_io1']
disk_args['device'] = constants.DEVICE_AWS
elif self.provider == constants.AZURE:
device = constants.DEVICE_AZURE
elif self.provider == constants.GCE:
# Note: using disk device order prediction,GCE API is not consistent in the disk naming
# device = constants.DEVICE_GCE + disk_name
device = constants.TEMP_DEVICE_GCE
if self.test_type == constants.CLUSTER_DISK:
self.connector.attach_disk(self.vms[1], disk_size=self.disk_size + 200, **disk_args)
for i in xrange(2, self.vm_count + 1):
self.connector.attach_disk(self.vms[i], disk_size=self.disk_size, **disk_args)
time.sleep(3)
return device
vm_tag = None
if self.test_type == constants.VM_DISK:
vm_tag = 1
elif self.test_type == constants.DB_DISK:
vm_tag = 2
if self.raid and type(self.raid) is int:
return self.attach_raid_disks(vm_tag, disk_args)
else:
self.connector.attach_disk(self.vms[vm_tag], disk_size=self.disk_size, **disk_args)
return device
def perf_tuning(self):
current_path = os.path.dirname(sys.modules['__main__'].__file__)
for i in range(1, self.vm_count + 1):
log.info('Running perf tuning on {}'.format(self.vm_ips[i]))
self.ssh_client[i].connect()
self.ssh_client[i].put_file(os.path.join(current_path, 'tests', 'perf_tuning.sh'),
'/tmp/perf_tuning.sh')
self.ssh_client[i].run('chmod +x /tmp/perf_tuning.sh')
self.ssh_client[i].run("sed -i 's/\r//' /tmp/perf_tuning.sh")
params = [self.provider]
if '.deb' in self.kernel:
log.info('Uploading kernel {} on {}'.format(self.kernel, self.vm_ips[i]))
self.ssh_client[i].put_file(os.path.join(self.localpath, self.kernel),
'/tmp/{}'.format(self.kernel))
params.append('/tmp/{}'.format(self.kernel))
self.ssh_client[i].run('/tmp/perf_tuning.sh {}'.format(' '.join(params)))
if self.provider in [constants.AWS, constants.GCE]:
self.ssh_client[i] = self.connector.restart_vm(self.vms[i])
elif self.provider == constants.AZURE:
self.vms[i] = self.connector.restart_vm(self.vms[i].name)
# TODO add custom kernel support for all providers - only azure support
self.ssh_client[i] = SSHClient(server=self.vms[i].name + self.connector.dns_suffix,
host_key_file=self.connector.host_key_file,
user=self.connector.user,
ssh_key_file=os.path.join(
self.connector.localpath,
self.connector.key_name + '.pem'))
ip = self.ssh_client[i].run(
'ifconfig eth0 | grep "inet\ addr" | cut -d: -f2 | cut -d" " -f1')
self.vm_ips[i] = ip[1].strip()
def run_test(self, ssh_vm_conf=0, testname=None, test_cmd=None, results_path=None, raid=False,
ssh_raid=1, timeout=constants.TIMEOUT):
try:
if all(client is not None for client in self.ssh_client.values()):
current_path = os.path.dirname(sys.modules['__main__'].__file__)
# enable key auth between instances
for i in xrange(1, ssh_vm_conf + 1):
self.ssh_client[i].put_file(os.path.join(self.localpath,
self.connector.key_name + '.pem'),
'/home/{}/.ssh/id_rsa'.format(self.user))
self.ssh_client[i].run('chmod 0600 /home/{0}/.ssh/id_rsa'.format(self.user))
if raid:
self.ssh_client[ssh_raid].put_file(os.path.join(
current_path, 'tests', 'raid.sh'), '/tmp/raid.sh')
self.ssh_client[ssh_raid].run('chmod +x /tmp/raid.sh')
self.ssh_client[ssh_raid].run("sed -i 's/\r//' /tmp/raid.sh")
self.ssh_client[ssh_raid].run('/tmp/raid.sh 0 {} {}'.format(raid, ' '.join(
self.device)))
bash_testname = 'run_{}.sh'.format(testname)
self.ssh_client[1].put_file(os.path.join(current_path, 'tests', bash_testname),
'/tmp/{}'.format(bash_testname))
self.ssh_client[1].run('chmod +x /tmp/{}'.format(bash_testname))
self.ssh_client[1].run("sed -i 's/\r//' /tmp/{}".format(bash_testname))
log.info('Starting background command {}'.format(test_cmd))
channel = self.ssh_client[1].run_pty(test_cmd)
_, pid, _ = self.ssh_client[1].run(
"ps aux | grep -v grep | grep {} | awk '{{print $2}}'".format(
bash_testname))
self._wait_for_pid(self.ssh_client[1], bash_testname, pid, timeout=timeout)
channel.close()
self.ssh_client[1].get_file('/tmp/{}.zip'.format(testname), results_path)
except Exception as e:
log.exception(e)
raise
finally:
if self.connector:
self.connector.teardown()
@staticmethod
def _wait_for_pid(ssh_client, bash_testname, pid, timeout=constants.TIMEOUT):
t = 0
while t < timeout:
try:
_, new_pid, _ = ssh_client.run(
"ps aux | grep -v grep | grep {} | awk '{{print $2}}'".format(
bash_testname))
if new_pid != pid:
return
except NoValidConnectionsError:
log.debug('NoValidConnectionsError, will retry in 60 seconds')
time.sleep(60)
t += 60
time.sleep(60)
t += 60
else:
raise Exception('Timeout waiting for process to end.'.format(timeout))
def run_test_nohup(self, ssh_vm_conf=0, test_cmd=None, timeout=constants.TIMEOUT, track=None):
try:
if all(client is not None for client in self.ssh_client.values()):
current_path = os.path.dirname(sys.modules['__main__'].__file__)
# enable key auth between instances
for i in xrange(1, ssh_vm_conf + 1):
self.ssh_client[i].put_file(os.path.join(self.localpath,
self.connector.key_name + '.pem'),
'/home/{}/.ssh/id_rsa'.format(self.user))
self.ssh_client[i].run('chmod 0600 /home/{0}/.ssh/id_rsa'.format(self.user))
log.info('Starting run nohup command {}'.format(test_cmd))
self.ssh_client[1].run(test_cmd)
self._wait_for_command(self.ssh_client[1], track, timeout=timeout)
except Exception as e:
log.exception(e)
raise
finally:
log.info('Finish to run nohup command {}'.format(test_cmd))
@staticmethod
def _wait_for_command(ssh_client, track, timeout=constants.TIMEOUT):
t = 0
while t < timeout:
try:
_, p_count, _ = ssh_client.run(
"ps aux | grep -v grep | grep {} | awk '{{print $2}}' | wc -l".format(
track))
if int(p_count) == 0 :
return
except NoValidConnectionsError:
log.debug('NoValidConnectionsError, will retry in 60 seconds')
time.sleep(60)
t += 60
time.sleep(60)
t += 60
else:
raise Exception('Timeout waiting for process to end.'.format(timeout)) |
jalavik/inspire-next | refs/heads/master | inspire/testsuite/test_export.py | 2 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import pkg_resources
import os
from dojson.contrib.marc21.utils import create_record
from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite
from inspire.dojson.hep import hep
from invenio.base.wrappers import lazy_import
Export = lazy_import('inspire.utils.export.Export')
class ExportTests(InvenioTestCase):
def setUp(self):
self.marcxml = pkg_resources.resource_string('inspire.testsuite',
os.path.join(
'fixtures',
'test_hep_formats.xml')
)
record = create_record(self.marcxml)
self.hep_record = hep.do(record)
self.export = Export(self.hep_record)
self.sample_export_good = {
'citation_key': 'Aad:2015wqa',
'doi': '10.1140/epjc/s10052-015-3661-9, 10.1140/epjc/s10052-015-3518-2',
'arxiv_field': {u'categories': [u'hep-ex'], u'value': u'arXiv:1503.03290'},
'arxiv': 'arXiv:1503.03290 [hep-ex]',
'reportNumber': 'CERN-PH-EP-2015-038',
'SLACcitation': '%%CITATION = ARXIV:1503.03290;%%',
}
def test_citation_key(self):
"""Test if citation key is created correctly"""
self.assertEqual(self.sample_export_good['citation_key'],
self.export._get_citation_key())
def test_doi(self):
"""Test if doi is created correctly"""
self.assertEqual(self.sample_export_good['doi'],
self.export._get_doi())
def test_arxiv_field(self):
"""Test if arxiv_field is created correctly"""
self.assertEqual(self.sample_export_good['arxiv_field'],
self.export.arxiv_field)
def test_arxiv(self):
"""Test if arxiv is created correctly"""
self.assertEqual(self.sample_export_good['arxiv'],
self.export._get_arxiv())
def test_report_number(self):
"""Test if report number is created correctly"""
self.assertEqual(self.sample_export_good['reportNumber'],
self.export._get_report_number())
def test_slac_citations(self):
"""Test if slac citation is created correctly"""
self.assertEqual(self.sample_export_good['SLACcitation'],
self.export._get_slac_citation())
TEST_SUITE = make_test_suite(ExportTests)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
p0psicles/SickRage | refs/heads/master | lib/feedparser/util.py | 36 | # Copyright 2010-2015 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
import warnings
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
'''
:return: A :class:`FeedParserDict`.
'''
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError("object doesn't have key 'category'")
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']=='enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']=='license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
'''
:return: A :class:`FeedParserDict`.
'''
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("object has no attribute '%s'" % key)
def __hash__(self):
return id(self)
|
VielSoft/odoo | refs/heads/8.0 | addons/l10n_ar/__openerp__.py | 260 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Argentina Localization Chart Account',
'version': '1.0',
'description': """
Argentinian accounting chart and tax localization.
==================================================
Plan contable argentino e impuestos de acuerdo a disposiciones vigentes
""",
'author': ['Cubic ERP'],
'website': 'http://cubicERP.com',
'category': 'Localization/Account Charts',
'depends': ['account_chart'],
'data':[
'account_tax_code.xml',
'l10n_ar_chart.xml',
'account_tax.xml',
'l10n_ar_wizard.xml',
],
'demo': [],
'active': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agiliq/django | refs/heads/master | django/conf/locale/fr/formats.py | 116 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
sonium0/pymatgen | refs/heads/master | pymatgen/alchemy/materials.py | 1 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
This module provides various representations of transformed structures. A
TransformedStructure is a structure that has been modified by undergoing a
series of transformations.
"""
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 2, 2012"
import os
import re
import json
import datetime
from copy import deepcopy
from monty.json import MontyDecoder
from pymatgen.core.structure import Structure
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.matproj.snl import StructureNL
from warnings import warn
dec = MontyDecoder()
class TransformedStructure(PMGSONable):
"""
Container object for new structures that include history of
transformations.
Each transformed structure is made up of a sequence of structures with
associated transformation history.
"""
def __init__(self, structure, transformations=None, history=None,
other_parameters=None):
"""
Initializes a transformed structure from a structure.
Args:
structure (Structure): Input structure
transformations ([Transformations]): List of transformations to
apply.
history (list): Previous history.
other_parameters (dict): Additional parameters to be added.
"""
self.final_structure = structure
self.history = history or []
self.other_parameters = other_parameters or {}
self._undone = []
transformations = transformations or []
for t in transformations:
self.append_transformation(t)
def undo_last_change(self):
"""
Undo the last change in the TransformedStructure.
Raises:
IndexError: If already at the oldest change.
"""
if len(self.history) == 0:
raise IndexError("Can't undo. Already at oldest change.")
if 'input_structure' not in self.history[-1]:
raise IndexError("Can't undo. Latest history has no "
"input_structure")
h = self.history.pop()
self._undone.append((h, self.final_structure))
s = h["input_structure"]
if isinstance(s, dict):
s = Structure.from_dict(s)
self.final_structure = s
def redo_next_change(self):
"""
Redo the last undone change in the TransformedStructure.
Raises:
IndexError: If already at the latest change.
"""
if len(self._undone) == 0:
raise IndexError("Can't redo. Already at latest change.")
h, s = self._undone.pop()
self.history.append(h)
self.final_structure = s
def __getattr__(self, name):
s = object.__getattribute__(self, 'final_structure')
return getattr(s, name)
def __len__(self):
return len(self.history)
def append_transformation(self, transformation, return_alternatives=False,
clear_redo=True):
"""
Appends a transformation to the TransformedStructure.
Args:
transformation: Transformation to append
return_alternatives: Whether to return alternative
TransformedStructures for one-to-many transformations.
return_alternatives can be a number, which stipulates the
total number of structures to return.
clear_redo: Boolean indicating whether to clear the redo list.
By default, this is True, meaning any appends clears the
history of undoing. However, when using append_transformation
to do a redo, the redo list should not be cleared to allow
multiple redos.
"""
if clear_redo:
self._undone = []
if return_alternatives and transformation.is_one_to_many:
ranked_list = transformation.apply_transformation(
self.final_structure, return_ranked_list=return_alternatives)
input_structure = self.final_structure.as_dict()
alts = []
for x in ranked_list[1:]:
s = x.pop("structure")
actual_transformation = x.pop("transformation", transformation)
hdict = actual_transformation.as_dict()
hdict["input_structure"] = input_structure
hdict["output_parameters"] = x
self.final_structure = s
d = self.as_dict()
d['history'].append(hdict)
d['final_structure'] = s.as_dict()
alts.append(TransformedStructure.from_dict(d))
x = ranked_list[0]
s = x.pop("structure")
actual_transformation = x.pop("transformation", transformation)
hdict = actual_transformation.as_dict()
hdict["input_structure"] = self.final_structure.as_dict()
hdict["output_parameters"] = x
self.history.append(hdict)
self.final_structure = s
return alts
else:
s = transformation.apply_transformation(self.final_structure)
hdict = transformation.as_dict()
hdict["input_structure"] = self.final_structure.as_dict()
hdict["output_parameters"] = {}
self.history.append(hdict)
self.final_structure = s
def append_filter(self, structure_filter):
"""
Adds a filter.
Args:
structure_filter (StructureFilter): A filter implementating the
AbstractStructureFilter API. Tells transmuter waht structures
to retain.
"""
hdict = structure_filter.as_dict()
hdict["input_structure"] = self.final_structure.as_dict()
self.history.append(hdict)
def extend_transformations(self, transformations,
return_alternatives=False):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
return_alternatives: Whether to return alternative
TransformedStructures for one-to-many transformations.
return_alternatives can be a number, which stipulates the
total number of structures to return.
"""
for t in transformations:
self.append_transformation(t,
return_alternatives=return_alternatives)
def get_vasp_input(self, vasp_input_set, generate_potcar=True):
"""
Returns VASP input as a dict of vasp objects.
Args:
vasp_input_set (pymatgen.io.vaspio_set.VaspInputSet): input set
to create vasp input files from structures
generate_potcar (bool): Set to False to generate a POTCAR.spec
file instead of a POTCAR, which contains the POTCAR labels
but not the actual POTCAR. Defaults to True.
"""
d = vasp_input_set.get_all_vasp_input(self.final_structure,
generate_potcar)
d["transformations.json"] = json.dumps(self.as_dict())
return d
def write_vasp_input(self, vasp_input_set, output_dir,
create_directory=True):
"""
Writes VASP input to an output_dir.
Args:
vasp_input_set:
pymatgen.io.vaspio_set.VaspInputSet like object that creates
vasp input files from structures
output_dir:
Directory to output files
create_directory:
Create the directory if not present. Defaults to True.
"""
vasp_input_set.write_input(self.final_structure, output_dir,
make_dir_if_not_present=create_directory)
with open(os.path.join(output_dir, "transformations.json"), "w") as fp:
json.dump(self.as_dict(), fp)
def __str__(self):
output = ["Current structure", "------------",
str(self.final_structure),
"\nHistory",
"------------"]
for h in self.history:
h.pop('input_structure', None)
output.append(str(h))
output.append("\nOther parameters")
output.append("------------")
output.append(str(self.other_parameters))
return "\n".join(output)
def set_parameter(self, key, value):
self.other_parameters[key] = value
@property
def was_modified(self):
"""
Boolean describing whether the last transformation on the structure
made any alterations to it one example of when this would return false
is in the case of performing a substitution transformation on the
structure when the specie to replace isn't in the structure.
"""
return not self.final_structure == self.structures[-2]
@property
def structures(self):
"""
Copy of all structures in the TransformedStructure. A
structure is stored after every single transformation.
"""
hstructs = [Structure.from_dict(s['input_structure'])
for s in self.history if 'input_structure' in s]
return hstructs + [self.final_structure]
@staticmethod
def from_cif_string(cif_string, transformations=None, primitive=True,
occupancy_tolerance=1.):
"""
Generates TransformedStructure from a cif string.
Args:
cif_string (str): Input cif string. Should contain only one
structure. For cifs containing multiple structures, please use
CifTransmuter.
transformations ([Transformations]): Sequence of transformations
to be applied to the input structure.
primitive (bool): Option to set if the primitive cell should be
extracted. Defaults to True. However, there are certain
instances where you might want to use a non-primitive cell,
e.g., if you are trying to generate all possible orderings of partial removals
or order a disordered structure.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
TransformedStructure
"""
parser = CifParser.from_string(cif_string, occupancy_tolerance)
raw_string = re.sub("'", "\"", cif_string)
cif_dict = parser.as_dict()
cif_keys = list(cif_dict.keys())
s = parser.get_structures(primitive)[0]
partial_cif = cif_dict[cif_keys[0]]
if "_database_code_ICSD" in partial_cif:
source = partial_cif["_database_code_ICSD"] + "-ICSD"
else:
source = "uploaded cif"
source_info = {"source": source,
"datetime": str(datetime.datetime.now()),
"original_file": raw_string,
"cif_data": cif_dict[cif_keys[0]]}
return TransformedStructure(s, transformations, history=[source_info])
@staticmethod
def from_poscar_string(poscar_string, transformations=None):
"""
Generates TransformedStructure from a poscar string.
Args:
poscar_string (str): Input POSCAR string.
transformations ([Transformations]): Sequence of transformations
to be applied to the input structure.
"""
p = Poscar.from_string(poscar_string)
if not p.true_names:
raise ValueError("Transformation can be craeted only from POSCAR "
"strings with proper VASP5 element symbols.")
raw_string = re.sub("'", "\"", poscar_string)
s = p.structure
source_info = {"source": "POSCAR",
"datetime": str(datetime.datetime.now()),
"original_file": raw_string}
return TransformedStructure(s, transformations, history=[source_info])
def as_dict(self):
"""
Dict representation of the TransformedStructure.
"""
d = self.final_structure.as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["history"] = deepcopy(self.history)
d["version"] = __version__
d["last_modified"] = str(datetime.datetime.utcnow())
d["other_parameters"] = deepcopy(self.other_parameters)
return d
@classmethod
def from_dict(cls, d):
"""
Creates a TransformedStructure from a dict.
"""
s = Structure.from_dict(d)
return cls(s, history=d["history"],
other_parameters=d.get("other_parameters", None))
def to_snl(self, authors, projects=None, references='', remarks=None,
data=None, created_at=None):
if self.other_parameters:
warn('Data in TransformedStructure.other_parameters discarded '
'during type conversion to SNL')
hist = []
for h in self.history:
snl_metadata = h.pop('_snl', {})
hist.append({'name' : snl_metadata.pop('name', 'pymatgen'),
'url' : snl_metadata.pop('url',
'http://pypi.python.org/pypi/pymatgen'),
'description' : h})
return StructureNL(self.final_structure, authors, projects, references,
remarks, data, hist, created_at)
@classmethod
def from_snl(cls, snl):
"""
Create TransformedStructure from SNL.
Args:
snl (StructureNL): Starting snl
Returns:
TransformedStructure
"""
hist = []
for h in snl.history:
d = h.description
d['_snl'] = {'url' : h.url, 'name' : h.name}
hist.append(d)
return cls(snl.structure, history=hist)
|
openiitbombayx/edx-platform | refs/heads/master | lms/djangoapps/lms_xblock/admin.py | 173 | """
Django admin dashboard configuration for LMS XBlock infrastructure.
"""
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin
from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig
admin.site.register(XBlockAsidesConfig, ConfigurationModelAdmin)
|
cw0100/cwse | refs/heads/master | nodejs/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py | 899 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
|
lexyan/SickBeard | refs/heads/master | lib/hachoir_core/field/timestamp.py | 90 | from lib.hachoir_core.tools import (humanDatetime, humanDuration,
timestampUNIX, timestampMac32, timestampUUID60,
timestampWin64, durationWin64)
from lib.hachoir_core.field import Bits, FieldSet
from datetime import datetime
class GenericTimestamp(Bits):
def __init__(self, parent, name, size, description=None):
Bits.__init__(self, parent, name, size, description)
def createDisplay(self):
return humanDatetime(self.value)
def createRawDisplay(self):
value = Bits.createValue(self)
return unicode(value)
def __nonzero__(self):
return Bits.createValue(self) != 0
def timestampFactory(cls_name, handler, size):
class Timestamp(GenericTimestamp):
def __init__(self, parent, name, description=None):
GenericTimestamp.__init__(self, parent, name, size, description)
def createValue(self):
value = Bits.createValue(self)
return handler(value)
cls = Timestamp
cls.__name__ = cls_name
return cls
TimestampUnix32 = timestampFactory("TimestampUnix32", timestampUNIX, 32)
TimestampUnix64 = timestampFactory("TimestampUnix64", timestampUNIX, 64)
TimestampMac32 = timestampFactory("TimestampUnix32", timestampMac32, 32)
TimestampUUID60 = timestampFactory("TimestampUUID60", timestampUUID60, 60)
TimestampWin64 = timestampFactory("TimestampWin64", timestampWin64, 64)
class TimeDateMSDOS32(FieldSet):
"""
32-bit MS-DOS timestamp (16-bit time, 16-bit date)
"""
static_size = 32
def createFields(self):
# TODO: Create type "MSDOS_Second" : value*2
yield Bits(self, "second", 5, "Second/2")
yield Bits(self, "minute", 6)
yield Bits(self, "hour", 5)
yield Bits(self, "day", 5)
yield Bits(self, "month", 4)
# TODO: Create type "MSDOS_Year" : value+1980
yield Bits(self, "year", 7, "Number of year after 1980")
def createValue(self):
return datetime(
1980+self["year"].value, self["month"].value, self["day"].value,
self["hour"].value, self["minute"].value, 2*self["second"].value)
def createDisplay(self):
return humanDatetime(self.value)
class DateTimeMSDOS32(TimeDateMSDOS32):
"""
32-bit MS-DOS timestamp (16-bit date, 16-bit time)
"""
def createFields(self):
yield Bits(self, "day", 5)
yield Bits(self, "month", 4)
yield Bits(self, "year", 7, "Number of year after 1980")
yield Bits(self, "second", 5, "Second/2")
yield Bits(self, "minute", 6)
yield Bits(self, "hour", 5)
class TimedeltaWin64(GenericTimestamp):
def __init__(self, parent, name, description=None):
GenericTimestamp.__init__(self, parent, name, 64, description)
def createDisplay(self):
return humanDuration(self.value)
def createValue(self):
value = Bits.createValue(self)
return durationWin64(value)
|
frouty/odoo_oph | refs/heads/dev_70 | addons/web_hello/__openerp__.py | 69 | {
'name': 'Hello',
'category': 'Hidden',
'description':"""
OpenERP Web example module.
===========================
""",
'version': '2.0',
'depends': [],
'js': ['static/*/*.js', 'static/*/js/*.js'],
'css': [],
'auto_install': False,
'web_preload': False,
}
|
zdary/intellij-community | refs/heads/master | python/testData/copyPaste/singleLine/Indent12.dst.py | 664 | class C:
def foo(self):
<caret> y = 2
|
acabey/acabey.github.io | refs/heads/master | projects/demos/engineering.purdue.edu/scriptingwithobjects/swocode/chap10/KeyError.py | 1 | #!/usr/local/bin/python
# KeyError.py
x = {'a' : 1, 'b' : 2} #(A)
print x['a'] # 1 #(B)
print x['b'] # 2 #(C)
print x['c'] # KeyError: 'c' #(D)
|
psav/cfme_tests | refs/heads/master | cfme/utils/net.py | 2 | from collections import defaultdict
import socket
import os
import re
from cfme.fixtures.pytest_store import store
from cfme.utils.log import logger
_ports = defaultdict(dict)
_dns_cache = {}
ip_address = re.compile(
r"^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}"
r"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$")
def random_port(tcp=True):
"""Get a random port number for making a socket
Args:
tcp: Return a TCP port number if True, UDP if False
This may not be reliable at all due to an inherent race condition. This works
by creating a socket on an ephemeral port, inspecting it to see what port was used,
closing it, and returning that port number. In the time between closing the socket
and opening a new one, it's possible for the OS to reopen that port for another purpose.
In practical testing, this race condition did not result in a failure to (re)open the
returned port number, making this solution squarely "good enough for now".
"""
# Port 0 will allocate an ephemeral port
socktype = socket.SOCK_STREAM if tcp else socket.SOCK_DGRAM
s = socket.socket(socket.AF_INET, socktype)
s.bind(('', 0))
addr, port = s.getsockname()
s.close()
return port
def my_ip_address(http=False):
"""Get the ip address of the host running tests using the service listed in cfme_data['ip_echo']
The ip echo endpoint is expected to write the ip address to the socket and close the
connection. See a working example of this in :py:func:`ip_echo_socket`.
"""
# the pytest store does this work, it's included here for convenience
return store.my_ip_address
def ip_echo_socket(port=32123):
"""A simple socket server, for use with :py:func:`my_ip_address`"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', port))
s.listen(0)
while True:
conn, addr = s.accept()
conn.sendall(addr[0])
conn.close()
def net_check(port, addr=None, force=False):
"""Checks the availablility of a port"""
port = int(port)
if not addr:
addr = store.current_appliance.hostname
if port not in _ports[addr] or force:
# First try DNS resolution
try:
addr = socket.gethostbyname(addr)
# Then try to connect to the port
try:
socket.create_connection((addr, port), timeout=10)
_ports[addr][port] = True
except socket.error:
_ports[addr][port] = False
except:
_ports[addr][port] = False
return _ports[addr][port]
def net_check_remote(port, addr=None, machine_addr=None, ssh_creds=None, force=False):
"""Checks the availability of a port from outside using another machine (over SSH)"""
from cfme.utils.ssh import SSHClient
port = int(port)
if not addr:
addr = my_ip_address()
if port not in _ports[addr] or force:
if not machine_addr:
machine_addr = store.current_appliance.hostname
if not ssh_creds:
ssh_client = store.current_appliance.ssh_client
else:
ssh_client = SSHClient(
hostname=machine_addr,
username=ssh_creds['username'],
password=ssh_creds['password']
)
with ssh_client:
# on exception => fails with return code 1
cmd = '''python -c "
import sys, socket
addr = socket.gethostbyname('%s')
socket.create_connection((addr, %d), timeout=10)
sys.exit(0)
"''' % (addr, port)
result = ssh_client.run_command(cmd)
_ports[addr][port] = result.success
return _ports[addr][port]
def resolve_hostname(hostname, force=False):
"""Cached DNS resolver. If the hostname does not resolve to an IP, returns None."""
if hostname not in _dns_cache or force:
try:
_dns_cache[hostname] = socket.gethostbyname(hostname)
except socket.gaierror:
_dns_cache[hostname] = None
return _dns_cache[hostname]
def resolve_ips(host_iterable, force_dns=False):
"""Takes list of hostnames, ips and another things. If the item is not an IP, it will be tried
to be converted to an IP. If that succeeds, it is appended to the set together with original
hostname. If it can't be resolved, just the original hostname is appended.
"""
result = set([])
for host in map(str, host_iterable):
result.add(host) # It is already an IP address
if ip_address.match(host) is None:
ip = resolve_hostname(host, force=force_dns)
if ip is not None:
result.add(ip)
return result
def is_pingable(ip_addr):
"""verifies the specified ip_address is reachable or not.
Args:
ip_addr: ip_address to verify the PING.
returns: return True is ip_address is pinging else returns False.
"""
try:
status = os.system("ping -c1 -w2 {}".format(ip_addr))
if status == 0:
logger.info('IP: %s is UP !', ip_addr)
return True
logger.info('IP: %s is DOWN !', ip_addr)
return False
except Exception as e:
logger.exception(e)
return False
|
NewPresident1/kitsune | refs/heads/master | kitsune/questions/tests/test_utils.py | 16 | from nose.tools import eq_
from kitsune.questions.models import Question, Answer
from kitsune.questions.tests import question, answer
from kitsune.questions.utils import (
num_questions, num_answers, num_solutions, mark_content_as_spam)
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import user
class ContributionCountTestCase(TestCase):
def test_num_questions(self):
"""Answers are counted correctly on a user."""
u = user(save=True)
eq_(num_questions(u), 0)
q1 = question(creator=u, save=True)
eq_(num_questions(u), 1)
q2 = question(creator=u, save=True)
eq_(num_questions(u), 2)
q1.delete()
eq_(num_questions(u), 1)
q2.delete()
eq_(num_questions(u), 0)
def test_num_answers(self):
u = user(save=True)
q = question(save=True)
eq_(num_answers(u), 0)
a1 = answer(creator=u, question=q, save=True)
eq_(num_answers(u), 1)
a2 = answer(creator=u, question=q, save=True)
eq_(num_answers(u), 2)
a1.delete()
eq_(num_answers(u), 1)
a2.delete()
eq_(num_answers(u), 0)
def test_num_solutions(self):
u = user(save=True)
q1 = question(save=True)
q2 = question(save=True)
a1 = answer(creator=u, question=q1, save=True)
a2 = answer(creator=u, question=q2, save=True)
eq_(num_solutions(u), 0)
q1.solution = a1
q1.save()
eq_(num_solutions(u), 1)
q2.solution = a2
q2.save()
eq_(num_solutions(u), 2)
q1.solution = None
q1.save()
eq_(num_solutions(u), 1)
a2.delete()
eq_(num_solutions(u), 0)
class FlagUserContentAsSpamTestCase(TestCase):
def test_flag_content_as_spam(self):
# Create some questions and answers by the user.
u = user(save=True)
question(creator=u, save=True)
question(creator=u, save=True)
answer(creator=u, save=True)
answer(creator=u, save=True)
answer(creator=u, save=True)
# Verify they are not marked as spam yet.
eq_(2, Question.objects.filter(is_spam=False, creator=u).count())
eq_(0, Question.objects.filter(is_spam=True, creator=u).count())
eq_(3, Answer.objects.filter(is_spam=False, creator=u).count())
eq_(0, Answer.objects.filter(is_spam=True, creator=u).count())
# Flag content as spam and verify it is updated.
mark_content_as_spam(u, user(save=True))
eq_(0, Question.objects.filter(is_spam=False, creator=u).count())
eq_(2, Question.objects.filter(is_spam=True, creator=u).count())
eq_(0, Answer.objects.filter(is_spam=False, creator=u).count())
eq_(3, Answer.objects.filter(is_spam=True, creator=u).count())
|
brainelectronics/towerdefense | refs/heads/master | examples/pyglet/image/codecs/quicktime.py | 43 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: pil.py 163 2006-11-13 04:15:46Z Alex.Holkner $'
import sys
from ctypes import *
from pyglet.gl import *
from pyglet.image import *
from pyglet.image.codecs import *
from pyglet.window.carbon import carbon, quicktime, _oscheck
from pyglet.libs.darwin.constants import _name
from pyglet.libs.darwin.types import *
Handle = POINTER(POINTER(c_byte))
GWorldPtr = c_void_p
carbon.NewHandle.restype = Handle
HandleDataHandlerSubType = _name('hndl')
PointerDataHandlerSubType = _name('ptr ')
kDataHCanRead = 1
kDataRefExtensionFileName = _name('fnam')
kDataRefExtensionMIMEType = _name('mime')
ComponentInstance = c_void_p
k1MonochromePixelFormat = 0x00000001
k2IndexedPixelFormat = 0x00000002
k4IndexedPixelFormat = 0x00000004
k8IndexedPixelFormat = 0x00000008
k16BE555PixelFormat = 0x00000010
k24RGBPixelFormat = 0x00000018
k32ARGBPixelFormat = 0x00000020
k32BGRAPixelFormat = _name('BGRA')
k1IndexedGrayPixelFormat = 0x00000021
k2IndexedGrayPixelFormat = 0x00000022
k4IndexedGrayPixelFormat = 0x00000024
k8IndexedGrayPixelFormat = 0x00000028
kNativeEndianPixMap = 1 << 8
kGraphicsImporterDontDoGammaCorrection = 1 << 0
kGraphicsImporterDontUseColorMatching = 1 << 3
newMovieActive = 1
noErr = 0
movieTrackMediaType = 1 << 0
movieTrackCharacteristic = 1 << 1
movieTrackEnabledOnly = 1 << 2
VisualMediaCharacteristic = _name('eyes')
nextTimeMediaSample = 1
class PointerDataRefRecord(Structure):
_fields_ = [
('data', c_void_p),
('dataLength', c_long)
]
def Str255(value):
return create_string_buffer(chr(len(value)) + value)
class QuickTimeImageDecoder(ImageDecoder):
def get_file_extensions(self):
# Only most common ones shown here
return ['.bmp', '.cur', '.gif', '.ico', '.jpg', '.jpeg', '.pcx', '.png',
'.tga', '.tif', '.tiff', '.xbm', '.xpm']
def get_animation_file_extensions(self):
return ['.gif']
def _get_data_ref(self, file, filename):
self._data_hold = data = create_string_buffer(file.read())
dataref = carbon.NewHandle(sizeof(PointerDataRefRecord))
datarec = cast(dataref,
POINTER(POINTER(PointerDataRefRecord))).contents.contents
datarec.data = addressof(data)
datarec.dataLength = len(data)
self._data_handler_holder = data_handler = ComponentInstance()
r = quicktime.OpenADataHandler(dataref, PointerDataHandlerSubType,
None, 0, None, kDataHCanRead, byref(data_handler))
_oscheck(r)
extension_handle = Handle()
self._filename_hold = filename = Str255(filename)
r = carbon.PtrToHand(filename, byref(extension_handle), len(filename))
r = quicktime.DataHSetDataRefExtension(data_handler, extension_handle,
kDataRefExtensionFileName)
_oscheck(r)
quicktime.DisposeHandle(extension_handle)
quicktime.DisposeHandle(dataref)
dataref = c_void_p()
r = quicktime.DataHGetDataRef(data_handler, byref(dataref))
_oscheck(r)
quicktime.CloseComponent(data_handler)
return dataref
def _get_formats(self):
# TODO choose 24 bit where appropriate.
if sys.byteorder == 'big':
format = 'ARGB'
qtformat = k32ARGBPixelFormat
else:
format = 'BGRA'
qtformat = k32BGRAPixelFormat
return format, qtformat
def decode(self, file, filename):
dataref = self._get_data_ref(file, filename)
importer = ComponentInstance()
quicktime.GetGraphicsImporterForDataRef(dataref,
PointerDataHandlerSubType, byref(importer))
if not importer:
raise ImageDecodeException(filename or file)
rect = Rect()
quicktime.GraphicsImportGetNaturalBounds(importer, byref(rect))
width = rect.right
height = rect.bottom
format, qtformat = self._get_formats()
buffer = (c_byte * (width * height * len(format)))()
world = GWorldPtr()
quicktime.QTNewGWorldFromPtr(byref(world), qtformat,
byref(rect), c_void_p(), c_void_p(), 0, buffer,
len(format) * width)
flags = (kGraphicsImporterDontUseColorMatching |
kGraphicsImporterDontDoGammaCorrection)
quicktime.GraphicsImportSetFlags(importer, flags)
quicktime.GraphicsImportSetGWorld(importer, world, c_void_p())
result = quicktime.GraphicsImportDraw(importer)
quicktime.DisposeGWorld(world)
quicktime.CloseComponent(importer)
if result != 0:
raise ImageDecodeException(filename or file)
pitch = len(format) * width
return ImageData(width, height, format, buffer, -pitch)
def decode_animation(self, file, filename):
# TODO: Stop playing chicken with the GC
# TODO: Cleanup in errors
quicktime.EnterMovies()
data_ref = self._get_data_ref(file, filename)
if not data_ref:
raise ImageDecodeException(filename or file)
movie = c_void_p()
id = c_short()
result = quicktime.NewMovieFromDataRef(byref(movie),
newMovieActive,
0,
data_ref,
PointerDataHandlerSubType)
if not movie:
#_oscheck(result)
raise ImageDecodeException(filename or file)
quicktime.GoToBeginningOfMovie(movie)
time_scale = float(quicktime.GetMovieTimeScale(movie))
format, qtformat = self._get_formats()
# Get movie width and height
rect = Rect()
quicktime.GetMovieBox(movie, byref(rect))
width = rect.right
height = rect.bottom
pitch = len(format) * width
# Set gworld
buffer = (c_byte * (width * height * len(format)))()
world = GWorldPtr()
quicktime.QTNewGWorldFromPtr(byref(world), qtformat,
byref(rect), c_void_p(), c_void_p(), 0, buffer,
len(format) * width)
quicktime.SetGWorld(world, 0)
quicktime.SetMovieGWorld(movie, world, 0)
visual = quicktime.GetMovieIndTrackType(movie, 1,
VisualMediaCharacteristic,
movieTrackCharacteristic)
if not visual:
raise ImageDecodeException('No video track')
time = 0
interesting_time = c_int()
quicktime.GetTrackNextInterestingTime(
visual,
nextTimeMediaSample,
time,
1,
byref(interesting_time),
None)
duration = interesting_time.value / time_scale
frames = []
while time >= 0:
result = quicktime.GetMoviesError()
if result == noErr:
# force redraw
result = quicktime.UpdateMovie(movie)
if result == noErr:
# process movie
quicktime.MoviesTask(movie, 0)
result = quicktime.GetMoviesError()
_oscheck(result)
buffer_copy = (c_byte * len(buffer))()
memmove(buffer_copy, buffer, len(buffer))
image = ImageData(width, height, format, buffer_copy, -pitch)
frames.append(AnimationFrame(image, duration))
interesting_time = c_int()
duration = c_int()
quicktime.GetTrackNextInterestingTime(
visual,
nextTimeMediaSample,
time,
1,
byref(interesting_time),
byref(duration))
quicktime.SetMovieTimeValue(movie, interesting_time)
time = interesting_time.value
duration = duration.value / time_scale
if duration <= 0.01:
duration = 0.1
quicktime.DisposeMovie(movie)
carbon.DisposeHandle(data_ref)
quicktime.ExitMovies()
return Animation(frames)
def get_decoders():
return [QuickTimeImageDecoder()]
def get_encoders():
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.