code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from django.conf.urls import patterns, include, url
from django.contrib import admin
import questproj.views as views
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^googlefd8980378f4a07d2.html$', views.google_verify),
# url(r'', include('django_stormpath.urls')),
# url(r'^questapp/', include('questapp.urls')), # put questapp at /questapp
url(r'^', include('questapp.urls')), # put questapp at /
url(r'^admin-page/', views.AdminPageFormView.as_view(), name="admin-page"),
url(r'^scoreboard', views.ScoreboardView.as_view(), name="scoreboard"),
url(r'^admin/', include(admin.site.urls)),
url(r'^about$', views.AboutView.as_view(), name="about"),
url(r'^user/account/$', views.UserAccountView.as_view(), name='user_account'),
)
| hillscottc/quest | questproj/urls.py | Python | gpl-2.0 | 801 |
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import unittest, os, argparse
def find_tests():
from calibre.utils.run_tests import find_tests_in_dir
base = os.path.dirname(os.path.abspath(__file__))
return find_tests_in_dir(base)
def run_tests(find_tests=find_tests, for_build=False):
if not for_build:
parser = argparse.ArgumentParser()
parser.add_argument('name', nargs='?', default=None,
help='The name of the test to run')
args = parser.parse_args()
if not for_build and args.name and args.name.startswith('.'):
tests = find_tests()
q = args.name[1:]
if not q.startswith('test_'):
q = 'test_' + q
ans = None
try:
for suite in tests:
for test in suite._tests:
if test.__class__.__name__ == 'ModuleImportFailure':
raise Exception('Failed to import a test module: %s' % test)
for s in test:
if s._testMethodName == q:
ans = s
raise StopIteration()
except StopIteration:
pass
if ans is None:
print ('No test named %s found' % args.name)
raise SystemExit(1)
tests = ans
else:
tests = unittest.defaultTestLoader.loadTestsFromName(args.name) if not for_build and args.name else find_tests()
r = unittest.TextTestRunner
if for_build:
r = r(verbosity=0, buffer=True, failfast=True)
else:
r = r(verbosity=4)
result = r.run(tests)
if for_build and result.errors or result.failures:
raise SystemExit(1)
if __name__ == '__main__':
run_tests()
| jelly/calibre | src/tinycss/tests/main.py | Python | gpl-3.0 | 1,952 |
# Copyright (c) 2008 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from ctypes import *
from ctypes.util import find_library
from errno import *
from functools import partial
from os import strerror
from platform import machine, system
from stat import S_IFDIR
from traceback import print_exc
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
class c_stat(Structure):
pass # Platform dependent
_system = system()
if _system in ('Darwin', 'FreeBSD'):
_libiconv = CDLL(
find_library("iconv"), RTLD_GLOBAL) # libfuse dependency
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_fsfilcnt_t = c_ulong
c_gid_t = c_uint32
c_mode_t = c_uint16
c_off_t = c_int64
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
c_fsblkcnt_t = c_ulonglong
c_fsfilcnt_t = c_ulonglong
c_gid_t = c_uint
c_mode_t = c_uint
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(
c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(
c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
_machine = machine()
if _machine == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad0', c_int),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'ppc':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulonglong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
else:
# i686, use as fallback for everything else
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1', c_ushort),
('__st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_ino', c_ulonglong)]
else:
raise NotImplementedError('%s is not supported.' % _system)
class c_statvfs(Structure):
_fields_ = [
('f_bsize', c_ulong),
('f_frsize', c_ulong),
('f_blocks', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_bavail', c_fsblkcnt_t),
('f_files', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_favail', c_fsfilcnt_t)]
if _system == 'FreeBSD':
c_fsblkcnt_t = c_uint64
c_fsfilcnt_t = c_uint64
setxattr_t = CFUNCTYPE(
c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(
c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
class c_statvfs(Structure):
_fields_ = [
('f_bavail', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_blocks', c_fsblkcnt_t),
('f_favail', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_files', c_fsfilcnt_t),
('f_bsize', c_ulong),
('f_flag', c_ulong),
('f_frsize', c_ulong)]
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh_old', c_ulong),
('writepage', c_int),
('direct_io', c_uint, 1),
('keep_cache', c_uint, 1),
('flush', c_uint, 1),
('padding', c_uint, 29),
('fh', c_uint64),
('lock_owner', c_uint64)]
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('uid', c_uid_t),
('gid', c_gid_t),
('pid', c_pid_t),
('private_data', c_voidp)]
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('getdir', c_voidp), # Deprecated, use readdir
('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('unlink', CFUNCTYPE(c_int, c_char_p)),
('rmdir', CFUNCTYPE(c_int, c_char_p)),
('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
('utime', c_voidp), # Deprecated, use utimens
('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
POINTER(fuse_file_info))),
(
'write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
POINTER(fuse_file_info))),
('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('setxattr', setxattr_t),
('getxattr', getxattr_t),
('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
(
'readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp,
c_char_p, POINTER(
c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))),
('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(
fuse_file_info))),
('init', CFUNCTYPE(c_voidp, c_voidp)),
('destroy', CFUNCTYPE(c_voidp, c_voidp)),
('access', CFUNCTYPE(c_int, c_char_p, c_int)),
('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(
fuse_file_info))),
('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(
fuse_file_info))),
('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
POINTER(fuse_file_info))),
('lock', CFUNCTYPE(
c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)),
('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))]
def time_of_timespec(ts):
return ts.tv_sec + ts.tv_nsec / 10 ** 9
def set_st_attrs(st, attrs):
for key, val in attrs.items():
if key in ('st_atime', 'st_mtime', 'st_ctime'):
timespec = getattr(st, key + 'spec')
timespec.tv_sec = int(val)
timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
elif hasattr(st, key):
setattr(st, key, val)
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
_libfuse = CDLL(_libfuse_path)
_libfuse.fuse_get_context.restype = POINTER(fuse_context)
def fuse_get_context():
"""Returns a (uid, gid, pid) tuple"""
ctxp = _libfuse.fuse_get_context()
ctx = ctxp.contents
return ctx.uid, ctx.gid, ctx.pid
class FuseOSError(OSError):
def __init__(self, errno):
super(FuseOSError, self).__init__(errno, strerror(errno))
class FUSE(object):
"""This class is the lower level interface and should not be subclassed
under normal use. Its methods are called by fuse.
Assumes API version 2.6 or later."""
def __init__(self, operations, mountpoint, raw_fi=False, **kwargs):
"""Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc."""
self.operations = operations
self.raw_fi = raw_fi
args = ['fuse']
if kwargs.pop('foreground', False):
args.append('-f')
if kwargs.pop('debug', False):
args.append('-d')
if kwargs.pop('nothreads', False):
args.append('-s')
kwargs.setdefault('fsname', operations.__class__.__name__)
args.append('-o')
args.append(','.join(key if val == True else '%s=%s' % (key, val)
for key, val in kwargs.items()))
args.append(mountpoint)
argv = (c_char_p * len(args))(*args)
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
op = partial(self._wrapper_, getattr(self, name))
setattr(fuse_ops, name, prototype(op))
err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
sizeof(fuse_ops), None)
del self.operations # Invoke the destructor
if err:
raise RuntimeError(err)
def _wrapper_(self, func, *args, **kwargs):
"""Decorator for the methods that follow"""
try:
return func(*args, **kwargs) or 0
except OSError, e:
return -(e.errno or EFAULT)
except:
print_exc()
return -EFAULT
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path)
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
def mknod(self, path, mode, dev):
return self.operations('mknod', path, mode, dev)
def mkdir(self, path, mode):
return self.operations('mkdir', path, mode)
def unlink(self, path):
return self.operations('unlink', path)
def rmdir(self, path):
return self.operations('rmdir', path)
def symlink(self, source, target):
return self.operations('symlink', target, source)
def rename(self, old, new):
return self.operations('rename', old, new)
def link(self, source, target):
return self.operations('link', target, source)
def chmod(self, path, mode):
return self.operations('chmod', path, mode)
def chown(self, path, uid, gid):
# Check if any of the arguments is a -1 that has overflowed
if c_uid_t(uid + 1).value == 0:
uid = -1
if c_gid_t(gid + 1).value == 0:
gid = -1
return self.operations('chown', path, uid, gid)
def truncate(self, path, length):
return self.operations('truncate', path, length)
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('open', path, fi)
else:
fi.fh = self.operations('open', path, fi.flags)
return 0
def read(self, path, buf, size, offset, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
ret = self.operations('read', path, size, offset, fh)
if not ret:
return 0
data = create_string_buffer(ret[:size], size)
memmove(buf, data, size)
return size
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('write', path, data, offset, fh)
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path)
for key, val in attrs.items():
if hasattr(stv, key):
setattr(stv, key, val)
return 0
def flush(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('flush', path, fh)
def release(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('release', path, fh)
def fsync(self, path, datasync, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('fsync', path, datasync, fh)
def setxattr(self, path, name, value, size, options, *args):
data = string_at(value, size)
return self.operations('setxattr', path, name, data, options, *args)
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path, name, *args)
retsize = len(ret)
buf = create_string_buffer(ret, retsize) # Does not add trailing 0
if bool(value):
if retsize > size:
return -ERANGE
memmove(value, buf, retsize)
return retsize
def listxattr(self, path, namebuf, size):
ret = self.operations('listxattr', path)
buf = create_string_buffer('\x00'.join(ret)) if ret else ''
bufsize = len(buf)
if bool(namebuf):
if bufsize > size:
return -ERANGE
memmove(namebuf, buf, bufsize)
return bufsize
def removexattr(self, path, name):
return self.operations('removexattr', path, name)
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir', path)
return 0
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', path, fip.contents.fh):
if isinstance(item, (str, unicode)):
name, st, offset = item, None, 0
else:
name, attrs, offset = item
if attrs:
st = c_stat()
set_st_attrs(st, attrs)
else:
st = None
if filler(buf, name, st, offset) != 0:
break
return 0
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', path, fip.contents.fh)
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', path, datasync, fip.contents.fh)
def init(self, conn):
return self.operations('init', '/')
def destroy(self, private_data):
return self.operations('destroy', '/')
def access(self, path, amode):
return self.operations('access', path, amode)
def create(self, path, mode, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('create', path, mode, fi)
else:
fi.fh = self.operations('create', path, mode)
return 0
def ftruncate(self, path, length, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('truncate', path, length, fh)
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
fh = fip and (fip.contents if self.raw_fi else fip.contents.fh)
attrs = self.operations('getattr', path, fh)
set_st_attrs(st, attrs)
return 0
def lock(self, path, fip, cmd, lock):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('lock', path, fh, cmd, lock)
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
mtime = time_of_timespec(buf.contents.modtime)
times = (atime, mtime)
else:
times = None
return self.operations('utimens', path, times)
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path, blocksize, idx)
class Operations(object):
"""This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise a FuseOSError exception
on error.
When in doubt of what an operation should do, check the FUSE header
file or the corresponding system call man page."""
def __call__(self, op, *args):
if not hasattr(self, op):
raise FuseOSError(EFAULT)
return getattr(self, op)(*args)
def access(self, path, amode):
return 0
bmap = None
def chmod(self, path, mode):
raise FuseOSError(EROFS)
def chown(self, path, uid, gid):
raise FuseOSError(EROFS)
def create(self, path, mode, fi=None):
"""When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0."""
raise FuseOSError(EROFS)
def destroy(self, path):
"""Called on filesystem destruction. Path is always /"""
pass
def flush(self, path, fh):
return 0
def fsync(self, path, datasync, fh):
return 0
def fsyncdir(self, path, datasync, fh):
return 0
def getattr(self, path, fh=None):
"""Returns a dictionary with keys identical to the stat C structure
of stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X concerning
st_nlink of directories. Mac OS X counts all files inside the directory,
while Linux counts only the subdirectories."""
if path != '/':
raise FuseOSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
def getxattr(self, path, name, position=0):
raise FuseOSError(ENOTSUP)
def init(self, path):
"""Called on filesystem initialization. Path is always /
Use it instead of __init__ if you start threads on initialization."""
pass
def link(self, target, source):
raise FuseOSError(EROFS)
def listxattr(self, path):
return []
lock = None
def mkdir(self, path, mode):
raise FuseOSError(EROFS)
def mknod(self, path, mode, dev):
raise FuseOSError(EROFS)
def open(self, path, flags):
"""When raw_fi is False (default case), open should return a numerical
file handle.
When raw_fi is True the signature of open becomes:
open(self, path, fi)
and the file handle should be set directly."""
return 0
def opendir(self, path):
"""Returns a numerical file handle."""
return 0
def read(self, path, size, offset, fh):
"""Returns a string containing the data requested."""
raise FuseOSError(EIO)
def readdir(self, path, fh):
"""Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr."""
return ['.', '..']
def readlink(self, path):
raise FuseOSError(ENOENT)
def release(self, path, fh):
return 0
def releasedir(self, path, fh):
return 0
def removexattr(self, path, name):
raise FuseOSError(ENOTSUP)
def rename(self, old, new):
raise FuseOSError(EROFS)
def rmdir(self, path):
raise FuseOSError(EROFS)
def setxattr(self, path, name, value, options, position=0):
raise FuseOSError(ENOTSUP)
def statfs(self, path):
"""Returns a dictionary with keys identical to the statvfs C structure
of statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512)."""
return {}
def symlink(self, target, source):
raise FuseOSError(EROFS)
def truncate(self, path, length, fh=None):
raise FuseOSError(EROFS)
def unlink(self, path):
raise FuseOSError(EROFS)
def utimens(self, path, times=None):
"""Times is a (atime, mtime) tuple. If None use current time."""
return 0
def write(self, path, data, offset, fh):
raise FuseOSError(EROFS)
class LoggingMixIn:
def __call__(self, op, path, *args):
print '->', op, path, repr(args)
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError, e:
ret = str(e)
raise
finally:
print '<-', op, repr(ret)
| Erethon/synnefo | contrib/snf-pithos-tools/pithos/tools/lib/fuse.py | Python | gpl-3.0 | 22,723 |
# Sulley EXception Class
class SullyRuntimeError(Exception):
pass | 0x90/wifuzzit | sulley/sex.py | Python | gpl-3.0 | 70 |
from social.tests.models import User
from social.tests.actions.actions import BaseActionTest
class LoginActionTest(BaseActionTest):
def test_login(self):
self.do_login()
def test_login_with_partial_pipeline(self):
self.do_login_with_partial_pipeline()
def test_fields_stored_in_session(self):
self.strategy.set_settings({
'SOCIAL_AUTH_FIELDS_STORED_IN_SESSION': ['foo', 'bar']
})
self.strategy.set_request_data({'foo': '1', 'bar': '2'}, self.backend)
self.do_login()
self.assertEqual(self.strategy.session_get('foo'), '1')
self.assertEqual(self.strategy.session_get('bar'), '2')
def test_redirect_value(self):
self.strategy.set_request_data({'next': '/after-login'}, self.backend)
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/after-login')
def test_login_with_invalid_partial_pipeline(self):
def before_complete():
partial = self.strategy.session_get('partial_pipeline')
partial['backend'] = 'foobar'
self.strategy.session_set('partial_pipeline', partial)
self.do_login_with_partial_pipeline(before_complete)
def test_new_user(self):
self.strategy.set_settings({
'SOCIAL_AUTH_NEW_USER_REDIRECT_URL': '/new-user'
})
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/new-user')
def test_inactive_user(self):
self.strategy.set_settings({
'SOCIAL_AUTH_INACTIVE_USER_URL': '/inactive'
})
User.set_active(False)
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/inactive')
def test_invalid_user(self):
self.strategy.set_settings({
'SOCIAL_AUTH_LOGIN_ERROR_URL': '/error',
'SOCIAL_AUTH_PIPELINE': (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'social.tests.pipeline.remove_user'
)
})
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/error')
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/social/tests/actions/test_login.py | Python | agpl-3.0 | 2,639 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
import gradient_checker
from decorator_helper import prog_scope
class TestElementwiseMulDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_mul(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseMulBroadcastDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_mul(x, y, axis=0)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseAddDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_add(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseAddBroadcastDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_add(x, y, axis=0)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseSubDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_sub(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseSubBroadcastDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_sub(x, y, axis=0)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseDivDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.0001
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_div(x, y, axis=0)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr[np.abs(y_arr) < 0.005] = 0.02
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps, atol=1e-3)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseDivBroadcastDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.0001
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[1:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_div(x, y, axis=1)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[1:-1]).astype(dtype)
y_arr[np.abs(y_arr) < 0.005] = 0.02
gradient_checker.double_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps, atol=1e-3)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseAddTripleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_add(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
gradient_checker.triple_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseAddBroadcastTripleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_add(x, y, axis=0)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)
gradient_checker.triple_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseMulTripleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape, False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_mul(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
gradient_checker.triple_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestElementwiseMulBroadcastTripleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
shape = [2, 3, 4, 5]
eps = 0.005
dtype = np.float64
x = layers.data('x', shape, False, dtype)
y = layers.data('y', shape[:-1], False, dtype)
x.persistable = True
y.persistable = True
out = layers.elementwise_add(x, y, axis=0)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)
gradient_checker.triple_grad_check(
[x, y], out, x_init=[x_arr, y_arr], place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
if __name__ == "__main__":
unittest.main()
| PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py | Python | apache-2.0 | 12,057 |
class Error(Exception):
pass
class PathError(Error, ValueError):
pass
class NotFoundError(Error, IOError):
pass
| codedcolors/pygrow | grow/pods/storage/errors.py | Python | mit | 122 |
#!/usr/bin/python2.2
import __future__
import os
import pexpect
import popen2
import re
import select
import sys
import time
import traceback
# swiped from doctest.py
class _SpoofOut:
def __init__(self):
self.clear()
def write(self, s):
self.buf.append(s)
def get(self):
guts = "".join(self.buf)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if guts and not guts.endswith("\n"):
guts = guts + "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return guts
def clear(self):
self.buf = []
if hasattr(self, "softspace"):
del self.softspace
def flush(self):
# JPython calls flush
pass
# Also shamelessly stolen from doctest. Get the future-flags
# associated with the future features that have been imported into
# globs.
def _extract_future_flags(globs):
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
import getopt
def getkwopt(argv,opt={},help=False):
"""
Get command line options and positional arguments.
Returns help text if help=True
Returns (kwopt,args) otherwise.
Sample input:
opt = {
'd': ('varisconf', "/var/isconf", "base of cache"),
'p': ('port', 9999, "port to listen on"),
'v': ('verbose', False, "verbose"),
}
Sample kwopt return value (with empty command line):
kwopt = {
'varisconf': "/var/isconf",
'port': 9999,
'verbose': False,
}
"""
kwopt = {}
optstr = ''
longopts = []
if help and not opt:
return ""
usagetxt = "options:\n"
for short in opt.keys():
long = opt[short][0]
default = opt[short][1]
desc = opt[short][2]
kwopt[long] = default
optstr += short
longopt = long
opthelp = " -%s, --%s" % (short,long)
if default is not True and default is not False:
optstr += ':'
longopt += '='
opthelp += '=' + str(default)
longopts.append(longopt)
sep=""
if len(opthelp) > 20:
sep="\n" + " " * 22
usagetxt += "%-22s%s%s\n" % (opthelp,sep,desc)
if help:
return usagetxt
(opts, args) = getopt.getopt(argv, optstr, longopts)
for (short,default) in opts:
short = short[1:] # strip off '-'
if default == '':
default = True
long = opt[short][0]
kwopt[long] = default
return (kwopt,args)
class docgen:
def main(self):
opt = {
'd': ('debug', False, "debug this script"),
'h': ('help', False, "this text"),
'v': ('verbose', False, "debug the test case"),
}
if '-h' in sys.argv:
print >>sys.stderr, getkwopt(sys.argv[1:],opt,help=True)
sys.exit(1)
(kwopt,args) = getkwopt(sys.argv[1:],opt)
modulepath = args[0]
libpath = "lib/python"
sys.path.append(libpath)
m = re.match(".*%s/(.*).py" % libpath, modulepath)
if not m:
raise "libpath (%s) not in module path (%s)" % (libpath,modulepath)
module = m.group(1).replace("/",".")
input = sys.stdin.readlines()
striplen = 0
if input[0].lstrip().startswith('>>> '):
striplen = 4
# remove old output lines
newinput = []
for i in range(len(input)):
line = input[i]
if re.match('\s*>>>\s+',line) \
or re.match('\s*\.\.\.\s+',line):
newinput.append(line)
input = newinput
indent = 0
firstline = True
code = ''
lastline = len(input) - 1
BLOCKSTART, BLOCKCONT, BLOCKEND = range(3)
state = BLOCKSTART
self.realout = sys.stdout
sys.stdout = fakeout = _SpoofOut()
# realerr = sys.stderr
# sys.stderr = fakeerr = _SpoofOut()
exec "import %s" % module
exec "globs = %s.__dict__" % module
compileflags = _extract_future_flags(globs)
for i in range(len(input)):
fakeout.clear()
icode = input[i]
lcode = icode.lstrip()
# show code
self.realout.write(icode)
# clean up input
if firstline:
firstline = False
indent = len(icode) - len(lcode)
self.indentstr = ' ' * indent
if kwopt['debug']: print >>sys.stderr, icode
scode = icode[indent+striplen:].rstrip()
# collect one complete code block
if state == BLOCKSTART:
code = scode
if state == BLOCKCONT:
# this is an indented continuation line
code += "\n" + scode
if i < lastline:
nextcode = input[i+1][indent+striplen:]
if nextcode and nextcode.startswith(' '):
# next line is continuation
state = BLOCKCONT
continue
state = BLOCKSTART
# kill doubled backslashes
code = eval(repr(code).replace('\\\\','\\'))
if not code:
continue
code += "\n"
# run it
try:
exec compile(code, "<string>", "single",
compileflags, 1) in globs
code = ''
out = fakeout.get()
except:
code = ''
out = fakeout.get()
if out:
self.wout(out)
exc_info = sys.exc_info()
exc_type, exc_val, tb = exc_info[:3]
if kwopt['verbose']:
out = traceback.format_exception(exc_type, exc_val, tb)
out = ''.join(out)
out = out.strip() + "\n"
self.wout(out)
else:
out = "Traceback (most recent call last):\n"
self.wout(out)
out = " (...doctest ignores traceback detail...)\n"
self.wout(out)
out = traceback.format_exception_only(exc_type, exc_val)[-1]
self.wout(out)
continue
if out:
self.wout(out)
def wout(self,out):
realout = self.realout
indentstr = self.indentstr
out = out.rstrip()
# put the doubled backslashes back
out = eval(repr(out).replace('\\\\','\\\\\\\\'))
# restore indent
out = out.replace('\n','\n' + indentstr)
realout.write(indentstr + out + "\n")
dg = docgen()
dg.main()
| stevegt/isconf4 | t/docgen.py | Python | gpl-2.0 | 7,279 |
'''
Requires paramiko >=1.8.0 (paramiko had an issue with multiprocessing prior
to this)
Example code showing how to use netmiko for multiprocessing. Create a
separate process for each ssh connection. Each subprocess executes a
'show version' command on the remote device. Use a multiprocessing.queue to
pass data from subprocess to parent process.
Only supports Python2
'''
# Catch Paramiko warnings about libgmp and RandomPool
import warnings
with warnings.catch_warnings(record=True) as w:
import paramiko
import multiprocessing
from datetime import datetime
import netmiko
from netmiko.ssh_exception import NetMikoTimeoutException, NetMikoAuthenticationException
# DEVICE_CREDS contains the devices to connect to
from DEVICE_CREDS import all_devices
def print_output(results):
print "\nSuccessful devices:"
for a_dict in results:
for identifier,v in a_dict.iteritems():
(success, out_string) = v
if success:
print '\n\n'
print '#' * 80
print 'Device = {0}\n'.format(identifier)
print out_string
print '#' * 80
print "\n\nFailed devices:\n"
for a_dict in results:
for identifier,v in a_dict.iteritems():
(success, out_string) = v
if not success:
print 'Device failed = {0}'.format(identifier)
print "\nEnd time: " + str(datetime.now())
print
def worker_show_version(a_device, mp_queue):
'''
Return a dictionary where the key is the device identifier
Value is (success|fail(boolean), return_string)
'''
try:
a_device['port']
except KeyError:
a_device['port'] = 22
identifier = '{ip}:{port}'.format(**a_device)
return_data = {}
show_ver_command = 'show version'
SSHClass = netmiko.ssh_dispatcher(a_device['device_type'])
try:
net_connect = SSHClass(**a_device)
show_version = net_connect.send_command(show_ver_command)
except (NetMikoTimeoutException, NetMikoAuthenticationException) as e:
return_data[identifier] = (False, e)
# Add data to the queue (for parent process)
mp_queue.put(return_data)
return None
return_data[identifier] = (True, show_version)
mp_queue.put(return_data)
def main():
mp_queue = multiprocessing.Queue()
processes = []
print "\nStart time: " + str(datetime.now())
for a_device in all_devices:
p = multiprocessing.Process(target=worker_show_version, args=(a_device, mp_queue))
processes.append(p)
# start the work process
p.start()
# wait until the child processes have completed
for p in processes:
p.join()
# retrieve all the data from the queue
results = []
for p in processes:
results.append(mp_queue.get())
print_output(results)
if __name__ == '__main__':
main() | nonemaw/pynet | python_netmiko_example/multiprocess_example.py | Python | gpl-2.0 | 2,939 |
import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="colorbar", parent_name="scatterternary.marker", **kwargs
):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter
ternary.marker.colorbar.Tickformatstop`
instances or dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scatterternary.marker.colorbar.tickformatstop
defaults), sets the default property values to
use for elements of
scatterternary.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatterternary.mar
ker.colorbar.Title` instance or dict with
compatible properties
titlefont
Deprecated: Please use
scatterternary.marker.colorbar.title.font
instead. Sets this color bar's title font. Note
that the title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scatterternary.marker.colorbar.title.side
instead. Determines the location of color bar's
title with respect to the color bar. Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/scatterternary/marker/_colorbar.py | Python | mit | 10,906 |
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from sqlalchemy import and_
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.db import ipam_backend_mixin
from neutron.db import models_v2
from neutron.ipam import requests as ipam_req
from neutron.ipam import subnet_alloc
from neutron.ipam import utils as ipam_utils
LOG = logging.getLogger(__name__)
class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
@staticmethod
def _generate_ip(context, subnets):
try:
return IpamNonPluggableBackend._try_generate_ip(context, subnets)
except n_exc.IpAddressGenerationFailure:
IpamNonPluggableBackend._rebuild_availability_ranges(context,
subnets)
return IpamNonPluggableBackend._try_generate_ip(context, subnets)
@staticmethod
def _try_generate_ip(context, subnets):
"""Generate an IP address.
The IP address will be generated from one of the subnets defined on
the network.
"""
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
for subnet in subnets:
ip_range = range_qry.filter_by(subnet_id=subnet['id']).first()
if not ip_range:
LOG.debug("All IPs from subnet %(subnet_id)s (%(cidr)s) "
"allocated",
{'subnet_id': subnet['id'],
'cidr': subnet['cidr']})
continue
ip_address = ip_range['first_ip']
if ip_range['first_ip'] == ip_range['last_ip']:
# No more free indices on subnet => delete
LOG.debug("No more free IP's in slice. Deleting "
"allocation pool.")
context.session.delete(ip_range)
else:
# increment the first free
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
ip_range['first_ip'] = new_first_ip
LOG.debug("Allocated IP - %(ip_address)s from %(first_ip)s "
"to %(last_ip)s",
{'ip_address': ip_address,
'first_ip': ip_address,
'last_ip': ip_range['last_ip']})
return {'ip_address': ip_address,
'subnet_id': subnet['id']}
raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id'])
@staticmethod
def _rebuild_availability_ranges(context, subnets):
"""Rebuild availability ranges.
This method is called only when there's no more IP available or by
_update_subnet_allocation_pools. Calling
_update_subnet_allocation_pools before calling this function deletes
the IPAllocationPools associated with the subnet that is updating,
which will result in deleting the IPAvailabilityRange too.
"""
ip_qry = context.session.query(
models_v2.IPAllocation).with_lockmode('update')
# PostgreSQL does not support select...for update with an outer join.
# No join is needed here.
pool_qry = context.session.query(
models_v2.IPAllocationPool).options(
orm.noload('available_ranges')).with_lockmode('update')
for subnet in sorted(subnets):
LOG.debug("Rebuilding availability ranges for subnet %s",
subnet)
# Create a set of all currently allocated addresses
ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id'])
allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address'])
for i in ip_qry_results])
for pool in pool_qry.filter_by(subnet_id=subnet['id']):
# Create a set of all addresses in the pool
poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'],
pool['last_ip']))
# Use set difference to find free addresses in the pool
available = poolset - allocations
# Generator compacts an ip set into contiguous ranges
def ipset_to_ranges(ipset):
first, last = None, None
for cidr in ipset.iter_cidrs():
if last and last + 1 != cidr.first:
yield netaddr.IPRange(first, last)
first = None
first, last = first if first else cidr.first, cidr.last
if first:
yield netaddr.IPRange(first, last)
# Write the ranges to the db
for ip_range in ipset_to_ranges(available):
available_range = models_v2.IPAvailabilityRange(
allocation_pool_id=pool['id'],
first_ip=str(netaddr.IPAddress(ip_range.first)),
last_ip=str(netaddr.IPAddress(ip_range.last)))
context.session.add(available_range)
@staticmethod
def _allocate_specific_ip(context, subnet_id, ip_address):
"""Allocate a specific IP address on the subnet."""
ip = int(netaddr.IPAddress(ip_address))
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
results = range_qry.filter_by(subnet_id=subnet_id)
for ip_range in results:
first = int(netaddr.IPAddress(ip_range['first_ip']))
last = int(netaddr.IPAddress(ip_range['last_ip']))
if first <= ip <= last:
if first == last:
context.session.delete(ip_range)
return
elif first == ip:
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
ip_range['first_ip'] = new_first_ip
return
elif last == ip:
new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
ip_range['last_ip'] = new_last_ip
return
else:
# Adjust the original range to end before ip_address
old_last_ip = ip_range['last_ip']
new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
ip_range['last_ip'] = new_last_ip
# Create a new second range for after ip_address
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
new_ip_range = models_v2.IPAvailabilityRange(
allocation_pool_id=ip_range['allocation_pool_id'],
first_ip=new_first_ip,
last_ip=old_last_ip)
context.session.add(new_ip_range)
return
@staticmethod
def _check_unique_ip(context, network_id, subnet_id, ip_address):
"""Validate that the IP address on the subnet is not in use."""
ip_qry = context.session.query(models_v2.IPAllocation)
try:
ip_qry.filter_by(network_id=network_id,
subnet_id=subnet_id,
ip_address=ip_address).one()
except exc.NoResultFound:
return True
return False
def save_allocation_pools(self, context, subnet, allocation_pools):
for pool in allocation_pools:
first_ip = str(netaddr.IPAddress(pool.first, pool.version))
last_ip = str(netaddr.IPAddress(pool.last, pool.version))
ip_pool = models_v2.IPAllocationPool(subnet=subnet,
first_ip=first_ip,
last_ip=last_ip)
context.session.add(ip_pool)
ip_range = models_v2.IPAvailabilityRange(
ipallocationpool=ip_pool,
first_ip=first_ip,
last_ip=last_ip)
context.session.add(ip_range)
def allocate_ips_for_port_and_store(self, context, port, port_id):
network_id = port['port']['network_id']
ips = self._allocate_ips_for_port(context, port)
if ips:
for ip in ips:
ip_address = ip['ip_address']
subnet_id = ip['subnet_id']
self._store_ip_allocation(context, ip_address, network_id,
subnet_id, port_id)
def update_port_with_ips(self, context, db_port, new_port, new_mac):
changes = self.Changes(add=[], original=[], remove=[])
# Check if the IPs need to be updated
network_id = db_port['network_id']
if 'fixed_ips' in new_port:
original = self._make_port_dict(db_port, process_extensions=False)
changes = self._update_ips_for_port(
context, network_id,
original["fixed_ips"], new_port['fixed_ips'],
original['mac_address'], db_port['device_owner'])
# Update ips if necessary
for ip in changes.add:
IpamNonPluggableBackend._store_ip_allocation(
context, ip['ip_address'], network_id,
ip['subnet_id'], db_port.id)
self._update_db_port(context, db_port, new_port, network_id, new_mac)
return changes
def _test_fixed_ips_for_port(self, context, network_id, fixed_ips,
device_owner):
"""Test fixed IPs for port.
Check that configured subnets are valid prior to allocating any
IPs. Include the subnet_id in the result if only an IP address is
configured.
:raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork,
InvalidIpForSubnet
"""
fixed_ip_set = []
for fixed in fixed_ips:
found = False
if 'subnet_id' not in fixed:
if 'ip_address' not in fixed:
msg = _('IP allocation requires subnet_id or ip_address')
raise n_exc.InvalidInput(error_message=msg)
filter = {'network_id': [network_id]}
subnets = self._get_subnets(context, filters=filter)
for subnet in subnets:
if ipam_utils.check_subnet_ip(subnet['cidr'],
fixed['ip_address']):
found = True
subnet_id = subnet['id']
break
if not found:
raise n_exc.InvalidIpForNetwork(
ip_address=fixed['ip_address'])
else:
subnet = self._get_subnet(context, fixed['subnet_id'])
if subnet['network_id'] != network_id:
msg = (_("Failed to create port on network %(network_id)s"
", because fixed_ips included invalid subnet "
"%(subnet_id)s") %
{'network_id': network_id,
'subnet_id': fixed['subnet_id']})
raise n_exc.InvalidInput(error_message=msg)
subnet_id = subnet['id']
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
if 'ip_address' in fixed:
# Ensure that the IP's are unique
if not IpamNonPluggableBackend._check_unique_ip(
context, network_id,
subnet_id, fixed['ip_address']):
raise n_exc.IpAddressInUse(net_id=network_id,
ip_address=fixed['ip_address'])
# Ensure that the IP is valid on the subnet
if (not found and
not ipam_utils.check_subnet_ip(subnet['cidr'],
fixed['ip_address'])):
raise n_exc.InvalidIpForSubnet(
ip_address=fixed['ip_address'])
if (is_auto_addr_subnet and
device_owner not in
constants.ROUTER_INTERFACE_OWNERS):
msg = (_("IPv6 address %(address)s can not be directly "
"assigned to a port on subnet %(id)s since the "
"subnet is configured for automatic addresses") %
{'address': fixed['ip_address'],
'id': subnet_id})
raise n_exc.InvalidInput(error_message=msg)
fixed_ip_set.append({'subnet_id': subnet_id,
'ip_address': fixed['ip_address']})
else:
# A scan for auto-address subnets on the network is done
# separately so that all such subnets (not just those
# listed explicitly here by subnet ID) are associated
# with the port.
if (device_owner in constants.ROUTER_INTERFACE_OWNERS or
device_owner == constants.DEVICE_OWNER_ROUTER_SNAT or
not is_auto_addr_subnet):
fixed_ip_set.append({'subnet_id': subnet_id})
if len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port:
msg = _('Exceeded maximim amount of fixed ips per port')
raise n_exc.InvalidInput(error_message=msg)
return fixed_ip_set
def _allocate_fixed_ips(self, context, fixed_ips, mac_address):
"""Allocate IP addresses according to the configured fixed_ips."""
ips = []
# we need to start with entries that asked for a specific IP in case
# those IPs happen to be next in the line for allocation for ones that
# didn't ask for a specific IP
fixed_ips.sort(key=lambda x: 'ip_address' not in x)
for fixed in fixed_ips:
subnet = self._get_subnet(context, fixed['subnet_id'])
is_auto_addr = ipv6_utils.is_auto_address_subnet(subnet)
if 'ip_address' in fixed:
if not is_auto_addr:
# Remove the IP address from the allocation pool
IpamNonPluggableBackend._allocate_specific_ip(
context, fixed['subnet_id'], fixed['ip_address'])
ips.append({'ip_address': fixed['ip_address'],
'subnet_id': fixed['subnet_id']})
# Only subnet ID is specified => need to generate IP
# from subnet
else:
if is_auto_addr:
ip_address = self._calculate_ipv6_eui64_addr(context,
subnet,
mac_address)
ips.append({'ip_address': ip_address.format(),
'subnet_id': subnet['id']})
else:
subnets = [subnet]
# IP address allocation
result = self._generate_ip(context, subnets)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
return ips
def _update_ips_for_port(self, context, network_id, original_ips,
new_ips, mac_address, device_owner):
"""Add or remove IPs from the port."""
added = []
changes = self._get_changed_ips_for_port(context, original_ips,
new_ips, device_owner)
# Check if the IP's to add are OK
to_add = self._test_fixed_ips_for_port(context, network_id,
changes.add, device_owner)
for ip in changes.remove:
LOG.debug("Port update. Hold %s", ip)
IpamNonPluggableBackend._delete_ip_allocation(context,
network_id,
ip['subnet_id'],
ip['ip_address'])
if to_add:
LOG.debug("Port update. Adding %s", to_add)
added = self._allocate_fixed_ips(context, to_add, mac_address)
return self.Changes(add=added,
original=changes.original,
remove=changes.remove)
def _allocate_ips_for_port(self, context, port):
"""Allocate IP addresses for the port.
If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
addresses for the port. If port['fixed_ips'] contains an IP address or
a subnet_id then allocate an IP address accordingly.
"""
p = port['port']
ips = []
v6_stateless = []
net_id_filter = {'network_id': [p['network_id']]}
subnets = self._get_subnets(context, filters=net_id_filter)
is_router_port = (
p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS or
p['device_owner'] == constants.DEVICE_OWNER_ROUTER_SNAT)
fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED
if fixed_configured:
configured_ips = self._test_fixed_ips_for_port(context,
p["network_id"],
p['fixed_ips'],
p['device_owner'])
ips = self._allocate_fixed_ips(context,
configured_ips,
p['mac_address'])
# For ports that are not router ports, implicitly include all
# auto-address subnets for address association.
if not is_router_port:
v6_stateless += [subnet for subnet in subnets
if ipv6_utils.is_auto_address_subnet(subnet)]
else:
# Split into v4, v6 stateless and v6 stateful subnets
v4 = []
v6_stateful = []
for subnet in subnets:
if subnet['ip_version'] == 4:
v4.append(subnet)
elif ipv6_utils.is_auto_address_subnet(subnet):
if not is_router_port:
v6_stateless.append(subnet)
else:
v6_stateful.append(subnet)
version_subnets = [v4, v6_stateful]
for subnets in version_subnets:
if subnets:
result = IpamNonPluggableBackend._generate_ip(context,
subnets)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
for subnet in v6_stateless:
# IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets
# are implicitly included.
ip_address = self._calculate_ipv6_eui64_addr(context, subnet,
p['mac_address'])
ips.append({'ip_address': ip_address.format(),
'subnet_id': subnet['id']})
return ips
def add_auto_addrs_on_network_ports(self, context, subnet):
"""For an auto-address subnet, add addrs for ports on the net."""
with context.session.begin(subtransactions=True):
network_id = subnet['network_id']
port_qry = context.session.query(models_v2.Port)
for port in port_qry.filter(
and_(models_v2.Port.network_id == network_id,
models_v2.Port.device_owner !=
constants.DEVICE_OWNER_ROUTER_SNAT,
~models_v2.Port.device_owner.in_(
constants.ROUTER_INTERFACE_OWNERS))):
ip_address = self._calculate_ipv6_eui64_addr(
context, subnet, port['mac_address'])
allocated = models_v2.IPAllocation(network_id=network_id,
port_id=port['id'],
ip_address=ip_address,
subnet_id=subnet['id'])
try:
# Do the insertion of each IP allocation entry within
# the context of a nested transaction, so that the entry
# is rolled back independently of other entries whenever
# the corresponding port has been deleted.
with context.session.begin_nested():
context.session.add(allocated)
except db_exc.DBReferenceError:
LOG.debug("Port %s was deleted while updating it with an "
"IPv6 auto-address. Ignoring.", port['id'])
def _calculate_ipv6_eui64_addr(self, context, subnet, mac_addr):
prefix = subnet['cidr']
network_id = subnet['network_id']
ip_address = ipv6_utils.get_ipv6_addr_by_EUI64(
prefix, mac_addr).format()
if not self._check_unique_ip(context, network_id,
subnet['id'], ip_address):
raise n_exc.IpAddressInUse(net_id=network_id,
ip_address=ip_address)
return ip_address
def allocate_subnet(self, context, network, subnet, subnetpool_id):
subnetpool = None
if subnetpool_id:
subnetpool = self._get_subnetpool(context, subnetpool_id)
self._validate_ip_version_with_subnetpool(subnet, subnetpool)
# gateway_ip and allocation pools should be validated or generated
# only for specific request
if subnet['cidr'] is not attributes.ATTR_NOT_SPECIFIED:
subnet['gateway_ip'] = self._gateway_ip_str(subnet,
subnet['cidr'])
# allocation_pools are converted to list of IPRanges
subnet['allocation_pools'] = self._prepare_allocation_pools(
subnet['allocation_pools'],
subnet['cidr'],
subnet['gateway_ip'])
subnet_request = ipam_req.SubnetRequestFactory.get_request(context,
subnet,
subnetpool)
if subnetpool_id:
driver = subnet_alloc.SubnetAllocator(subnetpool, context)
ipam_subnet = driver.allocate_subnet(subnet_request)
subnet_request = ipam_subnet.get_details()
subnet = self._save_subnet(context,
network,
self._make_subnet_args(
subnet_request,
subnet,
subnetpool_id),
subnet['dns_nameservers'],
subnet['host_routes'],
subnet_request)
return subnet
| eonpatapon/neutron | neutron/db/ipam_non_pluggable_backend.py | Python | apache-2.0 | 24,399 |
import web
import string
import random
import json
import datetime
@web.memoize
def get_db():
if 'database_url' in web.config:
return web.database(web.config.database_url)
else:
return web.database(**web.config.db_parameters)
class ResultSet:
"""Iterator wrapper over database result.
Provides utilities like first, list etc.
"""
def __init__(self, seq, model=web.storage):
self.model = model
self.seq = iter(seq)
def list(self):
return list(self)
def __iter__(self):
return self
def __next__(self):
return self.model(next(self.seq))
next = __next__
def first(self):
"""Returns the first element from the database result or None.
"""
try:
return next(self)
except StopIteration:
return None
class Model(web.storage):
"""Base model.
"""
@classmethod
def where(cls, **kw):
result = get_db().where(cls.TABLE, **kw)
return ResultSet(result, model=cls)
@classmethod
def find(cls, **kw):
"""Returns the first item matching the constraints specified as keywords.
"""
return cls.where(**kw).first()
@classmethod
def findall(cls, **kw):
"""Returns the first item matching the constraints specified as keywords.
"""
return cls.where(**kw).list()
@classmethod
def new(cls, **kw):
id = get_db().insert(cls.TABLE, **kw)
return cls.find(id=id)
def __hash__(self):
return self.id
def dict(self):
d = {}
rules = {
datetime.datetime: datetime.datetime.isoformat,
datetime.date: datetime.date.isoformat
}
for k, v in self.items():
if k.startswith("_"):
continue
for cls, converter in rules.items():
if isinstance(v, cls):
v = converter(v)
break
d[k] = v
return d
class User(Model):
TABLE = "users"
@classmethod
def new(cls, name, email, phone=None, **kw):
if 'username' not in kw:
kw['username'] = cls._suggest_username(email)
id = get_db().insert("users", name=name, email=email, phone=phone, **kw)
return cls.find(id=id)
@staticmethod
def _suggest_username(email):
"""suggests a username based on email.
"""
basename = email.split("@")[0]
username = basename
for i in range(1, 100):
if not User.find(username=username):
return username
username = "{}-{}".format(basename, i)
# select a random prefix it above attempt fails
suffix = "".join(random.choice(string.lowercase) for i in range(4))
return "{}-{}".format(basename, suffix)
def update(self, **kw):
get_db().update("users", where='id=$id', vars=self, **kw)
dict.update(self, kw)
def make_trainer(self):
self.update(is_trainer=True)
def is_trainer(self):
return self['is_trainer']
def is_admin(self):
return self['is_admin']
@classmethod
def find_all_org_members(cls):
result = get_db().query(
"SELECT distinct(users.*) FROM users" +
" JOIN organization_members on user_id=users.id")
return [cls(row) for row in result]
class Organization(Model):
TABLE = "organization"
@classmethod
def new(cls, name, city):
id = get_db().insert("organization", name=name, city=city)
return cls.find(id=id)
def add_member(self, user, role):
get_db().insert("organization_members", org_id=self.id, user_id=user.id, role=role)
def get_workshops(self, status=None):
"""Returns list of workshops by this organiazation.
"""
wheres = {}
if status:
wheres['status'] = status
return Workshop.findall(org_id=self.id, order='date desc', **wheres)
def add_new_workshop(self, title, description,
expected_participants, date):
return Workshop.new(self, title, description,
expected_participants, date)
def is_admin(self, email):
"""Returns True of given email is an admin of this org.
"""
if not email:
return False
# Admin user is admin of every org
if web.config.get('admin_user') == email:
return True
admin = self.get_admin()
if admin and admin.email == email:
return True
return False
def is_member(self, user):
result = get_db().query(
"SELECT * FROM organization_members" +
" WHERE org_id=$self.id AND user_id=$user.id",
vars=locals())
return bool(result)
def can_update(self, user):
if not user:
return False
else:
return user.is_admin() or self.is_member(user)
def get_members(self):
result = get_db().query(
"SELECT users.*, role FROM users" +
" JOIN organization_members ON organization_members.user_id=users.id" +
" WHERE organization_members.org_id=$self.id", vars=locals())
def make_member(row):
role = row.pop('role')
member = User(row)
return member, role
return [make_member(row) for row in result]
class Workshop(Model):
TABLE = "workshop"
@classmethod
def new(cls, org, title, description, expected_participants, date):
id = get_db().insert("workshop",
org_id=org.id,
title=title,
description=description,
expected_participants=expected_participants,
date=date)
return cls.find(id=id)
def update(self, **kw):
get_db().update("workshop", where='id=$id', vars=self, **kw)
dict.update(self, kw)
def get_trainer(self):
return self.trainer_id and User.find(id=self.trainer_id)
def get_trainers(self):
"""Returns all the trainers conducting this workshop.
"""
trainers = set()
if self.trainer_id:
trainers.add(self.trainer_id)
result = get_db().where(
"workshop_trainers",
workshop_id=self.id,
status='confirmed')
trainers.update(row.trainer_id for row in result)
return [User.find(id=id) for id in trainers]
def set_trainer(self, trainer):
self.update(trainer_id=trainer.id, status='confirmed')
def confirm_trainer(self, trainer):
self.set_trainer(trainer)
def get_org(self):
return Organization.find(id=self.org_id)
def record_interest(self, trainer):
"""Record that the given trainer has shown interest to conduct
the this workshop.
"""
get_db().insert("workshop_trainers", workshop_id=self.id, trainer_id=trainer.id)
def cancel_interest(self, trainer):
"""Record that the given trainer has shown interest to conduct
the this workshop.
"""
get_db().delete("workshop_trainers",
where="workshop_id=$self.id AND trainer_id=$trainer.id",
vars=locals())
def get_interested_trainers(self):
db = get_db()
rows = db.where("workshop_trainers", workshop_id=self.id)
ids = [row.trainer_id for row in rows]
if ids:
rows = db.query("SELECT * FROM users WHERE id IN $ids", vars={"ids": ids})
return [User(row) for row in rows]
else:
return []
def is_interested_trainer(self, user):
rows = get_db().where("workshop_trainers",
workshop_id=self.id,
trainer_id=user.id).list()
return bool(rows)
def get_comments(self):
return Comment.findall(workshop_id=self.id, order='created')
def add_comment(self, user, comment):
return Comment.new(
workshop_id=self.id,
author_id=user.id,
comment=comment)
def get_followers(self):
followers = set()
# add org members
followers.update(m for m, role in self.get_org().get_members())
# add trainers
if self.status == 'pending':
followers.update(self.get_interested_trainers())
elif self.status == 'confirmed':
followers.add(self.get_trainer)
# add commenters
followers.update(c.get_author() for c in self.get_comments())
print followers
return list(followers)
def dict(self):
d = dict(self)
d['date'] = self.date and self.date.isoformat()
return d
class Comment(Model):
TABLE = "comment"
def get_author(self):
return User.find(id=self.author_id)
def get_workshop(self):
return Workshop.find(id=self.workshop_id)
class Activity(Model):
TABLE = "activity"
@classmethod
def get_recent_activity(cls, limit=50, offset=0):
return cls.findall(limit=limit, offset=offset, order='tstamp desc')
@classmethod
def new(cls, type, user, info):
id = get_db().insert("activity",
type=type,
user_id=user and user.id,
user_name=user and user.name,
info=json.dumps(info))
return Activity.find(id=id)
| fsmk/fsmkschool | broadgauge/models.py | Python | bsd-3-clause | 9,468 |
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from nova.objects import keypair as keypair_obj
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.tests.unit import fake_crypto
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
microversion = None
sample_dir = "keypairs"
expected_delete_status_code = 202
expected_post_status_code = 200
def _get_flags(self):
f = super(KeyPairsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
return f
def setUp(self):
super(KeyPairsSampleJsonTest, self).setUp()
self.api.microversion = self.microversion
# TODO(sdague): this is only needed because we randomly choose the
# uuid each time.
def generalize_subs(self, subs, vanilla_regexes):
subs['keypair_name'] = 'keypair-[0-9a-f-]+'
return subs
def test_keypairs_post(self):
return self._check_keypairs_post()
def _check_keypairs_post(self, **kwargs):
"""Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, **kwargs)
response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
subs = {'keypair_name': key_name}
self._verify_response('keypairs-post-resp', subs, response,
self.expected_post_status_code)
# NOTE(maurosr): return the key_name is necessary cause the
# verification returns the label of the last compared information in
# the response, not necessarily the key name.
return key_name
def test_keypairs_import_key_post(self):
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(public_key)
def _check_keypairs_import_key_post(self, public_key, **kwargs):
# Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid.uuid4())
subs = {
'keypair_name': key_name,
}
params = subs.copy()
params['public_key'] = public_key
params.update(**kwargs)
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
params)
self._verify_response('keypairs-import-post-resp', subs, response,
self.expected_post_status_code)
def test_keypairs_list(self):
# Get api sample of key pairs list request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs')
subs = {'keypair_name': key_name}
self._verify_response('keypairs-list-resp', subs, response, 200)
def test_keypairs_get(self):
# Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs/%s' % key_name)
subs = {'keypair_name': key_name}
self._verify_response('keypairs-get-resp', subs, response, 200)
def test_keypairs_delete(self):
# Get api sample of key pairs delete request.
key_name = self.test_keypairs_post()
response = self._do_delete('os-keypairs/%s' % key_name)
self.assertEqual(self.expected_delete_status_code,
response.status_code)
class KeyPairsV22SampleJsonTest(KeyPairsSampleJsonTest):
microversion = '2.2'
expected_post_status_code = 201
expected_delete_status_code = 204
# NOTE(gmann): microversion tests do not need to run for v2 API
# so defining scenarios only for v2.2 which will run the original tests
# by appending '(v2_2)' in test_id.
scenarios = [('v2_2', {'api_major_version': 'v2.1'})]
def test_keypairs_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, as it is used by other tests.
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH)
def test_keypairs_post_x509(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
def test_keypairs_post_invalid(self):
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, keypair_type='fakey_type')
response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
self.assertEqual(400, response.status_code)
def test_keypairs_import_key_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, since the API sample expects a keypair_type.
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH)
def test_keypairs_import_key_post_x509(self):
public_key = fake_crypto.get_x509_cert_and_fingerprint()[0]
public_key = public_key.replace('\n', '\\n')
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
def _check_keypairs_import_key_post_invalid(self, keypair_type):
key_name = 'keypair-' + str(uuid.uuid4())
subs = {
'keypair_name': key_name,
'keypair_type': keypair_type,
'public_key': fake_crypto.get_ssh_public_key()
}
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
subs)
self.assertEqual(400, response.status_code)
def test_keypairs_import_key_post_invalid_type(self):
self._check_keypairs_import_key_post_invalid(
keypair_type='fakey_type')
def test_keypairs_import_key_post_invalid_combination(self):
self._check_keypairs_import_key_post_invalid(
keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
class KeyPairsV210SampleJsonTest(KeyPairsSampleJsonTest):
ADMIN_API = True
microversion = '2.10'
expected_post_status_code = 201
expected_delete_status_code = 204
scenarios = [('v2_10', {'api_major_version': 'v2.1'})]
def test_keypair_create_for_user(self):
subs = {
'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
'public_key': fake_crypto.get_ssh_public_key(),
'user_id': "fake"
}
self._check_keypairs_post(**subs)
def test_keypairs_post(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="admin")
def test_keypairs_import_key_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, since the API sample expects a keypair_type.
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="fake")
def test_keypairs_delete_for_user(self):
# Delete a keypair on behalf of a user
subs = {
'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
'public_key': fake_crypto.get_ssh_public_key(),
'user_id': "fake"
}
key_name = self._check_keypairs_post(**subs)
response = self._do_delete('os-keypairs/%s?user_id=fake' % key_name)
self.assertEqual(self.expected_delete_status_code,
response.status_code)
class KeyPairsV210SampleJsonTestNotAdmin(KeyPairsV210SampleJsonTest):
ADMIN_API = False
def test_keypairs_post(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="fake")
def test_keypairs_post_for_other_user(self):
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name,
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id='fake1')
response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
self.assertEqual(403, response.status_code)
| zhimin711/nova | nova/tests/functional/api_sample_tests/test_keypairs.py | Python | apache-2.0 | 8,913 |
"""Tests for polynomial module.
"""
from __future__ import division
import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import *
def trim(x) :
return poly.polytrim(x, tol=1e-6)
T0 = [ 1]
T1 = [ 0, 1]
T2 = [-1, 0, 2]
T3 = [ 0, -3, 0, 4]
T4 = [ 1, 0, -8, 0, 8]
T5 = [ 0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [ 0, -7, 0, 56, 0, -112, 0, 64]
T8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestConstants(TestCase) :
def test_polydomain(self) :
assert_equal(poly.polydomain, [-1, 1])
def test_polyzero(self) :
assert_equal(poly.polyzero, [0])
def test_polyone(self) :
assert_equal(poly.polyone, [1])
def test_polyx(self) :
assert_equal(poly.polyx, [0, 1])
class TestArithmetic(TestCase) :
def test_polyadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = poly.polyadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polysub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = poly.polysub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polymul(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += 1
res = poly.polymul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polydiv(self) :
# check zero division
assert_raises(ZeroDivisionError, poly.polydiv, [1], [0])
# check scalar division
quo, rem = poly.polydiv([2],[2])
assert_equal((quo, rem), (1, 0))
quo, rem = poly.polydiv([2,2],[2])
assert_equal((quo, rem), ((1,1), 0))
# check rest.
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1,2]
cj = [0]*j + [1,2]
tgt = poly.polyadd(ci, cj)
quo, rem = poly.polydiv(tgt, ci)
res = poly.polyadd(poly.polymul(quo, ci), rem)
assert_equal(res, tgt, err_msg=msg)
def test_polyval(self) :
def f(x) :
return x*(x**2 - 1)
#check empty input
assert_equal(poly.polyval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1,1)
for i in range(5) :
tgt = x**i
res = poly.polyval(x, [0]*i + [1])
assert_almost_equal(res, tgt)
tgt = f(x)
res = poly.polyval(x, [0, -1, 0, 1])
assert_almost_equal(res, tgt)
#check that shape is preserved
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(poly.polyval(x, [1]).shape, dims)
assert_equal(poly.polyval(x, [1,0]).shape, dims)
assert_equal(poly.polyval(x, [1,0,0]).shape, dims)
class TestCalculus(TestCase) :
def test_polyint(self) :
# check exceptions
assert_raises(ValueError, poly.polyint, [0], .5)
assert_raises(ValueError, poly.polyint, [0], -1)
assert_raises(ValueError, poly.polyint, [0], 1, [0,0])
assert_raises(ValueError, poly.polyint, [0], 1, lbnd=[0,0])
assert_raises(ValueError, poly.polyint, [0], 1, scl=[0,0])
# check single integration with integration constant
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
res = poly.polyint(pol, m=1, k=[i])
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
res = poly.polyint(pol, m=1, k=[i], lbnd=-1)
assert_almost_equal(poly.polyval(-1, res), i)
# check single integration with integration constant and scaling
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
res = poly.polyint(pol, m=1, k=[i], scl=2)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1)
res = poly.polyint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1, k=[k])
res = poly.polyint(pol, m=j, k=range(j))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)
res = poly.polyint(pol, m=j, k=range(j), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1, k=[k], scl=2)
res = poly.polyint(pol, m=j, k=range(j), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_polyder(self) :
# check exceptions
assert_raises(ValueError, poly.polyder, [0], .5)
assert_raises(ValueError, poly.polyder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5) :
tgt = [1] + [0]*i
res = poly.polyder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = poly.polyder(poly.polyint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
class TestMisc(TestCase) :
def test_polyfromroots(self) :
res = poly.polyfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = Tlist[i]
res = poly.polyfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res),trim(tgt))
def test_polyroots(self) :
assert_almost_equal(poly.polyroots([1]), [])
assert_almost_equal(poly.polyroots([1, 2]), [-.5])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = poly.polyroots(poly.polyfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_polyvander(self) :
# check for 1d x
x = np.arange(3)
v = poly.polyvander(x, 3)
assert_(v.shape == (3,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], poly.polyval(x, coef))
# check for 2d x
x = np.array([[1,2],[3,4],[5,6]])
v = poly.polyvander(x, 3)
assert_(v.shape == (3,2,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], poly.polyval(x, coef))
def test_polyfit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, poly.polyfit, [1], [1], -1)
assert_raises(TypeError, poly.polyfit, [[1]], [1], 0)
assert_raises(TypeError, poly.polyfit, [], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0)
assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0)
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1,1])
# Test fit
x = np.linspace(0,2)
y = f(x)
#
coef3 = poly.polyfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(poly.polyval(x, coef3), y)
#
coef4 = poly.polyfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(poly.polyval(x, coef4), y)
#
coef2d = poly.polyfit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
wcoef3 = poly.polyfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = poly.polyfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
def test_polytrim(self) :
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, poly.polytrim, coef, -1)
# Test results
assert_equal(poly.polytrim(coef), coef[:-1])
assert_equal(poly.polytrim(coef, 1), coef[:-3])
assert_equal(poly.polytrim(coef, 2), [0])
def test_polyline(self) :
assert_equal(poly.polyline(3,4), [3, 4])
class TestPolynomialClass(TestCase) :
p1 = poly.Polynomial([1,2,3])
p2 = poly.Polynomial([1,2,3], [0,1])
p3 = poly.Polynomial([1,2])
p4 = poly.Polynomial([2,2,3])
p5 = poly.Polynomial([3,2,3])
def test_equal(self) :
assert_(self.p1 == self.p1)
assert_(self.p2 == self.p2)
assert_(not self.p1 == self.p2)
assert_(not self.p1 == self.p3)
assert_(not self.p1 == [1,2,3])
def test_not_equal(self) :
assert_(not self.p1 != self.p1)
assert_(not self.p2 != self.p2)
assert_(self.p1 != self.p2)
assert_(self.p1 != self.p3)
assert_(self.p1 != [1,2,3])
def test_add(self) :
tgt = poly.Polynomial([2,4,6])
assert_(self.p1 + self.p1 == tgt)
assert_(self.p1 + [1,2,3] == tgt)
assert_([1,2,3] + self.p1 == tgt)
def test_sub(self) :
tgt = poly.Polynomial([1])
assert_(self.p4 - self.p1 == tgt)
assert_(self.p4 - [1,2,3] == tgt)
assert_([2,2,3] - self.p1 == tgt)
def test_mul(self) :
tgt = poly.Polynomial([1,4,10,12,9])
assert_(self.p1 * self.p1 == tgt)
assert_(self.p1 * [1,2,3] == tgt)
assert_([1,2,3] * self.p1 == tgt)
def test_floordiv(self) :
tgt = poly.Polynomial([1])
assert_(self.p4 // self.p1 == tgt)
assert_(self.p4 // [1,2,3] == tgt)
assert_([2,2,3] // self.p1 == tgt)
def test_mod(self) :
tgt = poly.Polynomial([1])
assert_((self.p4 % self.p1) == tgt)
assert_((self.p4 % [1,2,3]) == tgt)
assert_(([2,2,3] % self.p1) == tgt)
def test_divmod(self) :
tquo = poly.Polynomial([1])
trem = poly.Polynomial([2])
quo, rem = divmod(self.p5, self.p1)
assert_(quo == tquo and rem == trem)
quo, rem = divmod(self.p5, [1,2,3])
assert_(quo == tquo and rem == trem)
quo, rem = divmod([3,2,3], self.p1)
assert_(quo == tquo and rem == trem)
def test_pow(self) :
tgt = poly.Polynomial([1])
for i in range(5) :
res = self.p1**i
assert_(res == tgt)
tgt *= self.p1
def test_call(self) :
# domain = [-1, 1]
x = np.linspace(-1, 1)
tgt = (3*x + 2)*x + 1
assert_almost_equal(self.p1(x), tgt)
# domain = [0, 1]
x = np.linspace(0, 1)
xx = 2*x - 1
assert_almost_equal(self.p2(x), self.p1(xx))
def test_degree(self) :
assert_equal(self.p1.degree(), 2)
def test_trimdeg(self) :
assert_raises(ValueError, self.p1.cutdeg, .5)
assert_raises(ValueError, self.p1.cutdeg, -1)
assert_equal(len(self.p1.cutdeg(3)), 3)
assert_equal(len(self.p1.cutdeg(2)), 3)
assert_equal(len(self.p1.cutdeg(1)), 2)
assert_equal(len(self.p1.cutdeg(0)), 1)
def test_convert(self) :
x = np.linspace(-1,1)
p = self.p1.convert(domain=[0,1])
assert_almost_equal(p(x), self.p1(x))
def test_mapparms(self) :
parms = self.p2.mapparms()
assert_almost_equal(parms, [-1, 2])
def test_trim(self) :
coef = [1, 1e-6, 1e-12, 0]
p = poly.Polynomial(coef)
assert_equal(p.trim().coef, coef[:3])
assert_equal(p.trim(1e-10).coef, coef[:2])
assert_equal(p.trim(1e-5).coef, coef[:1])
def test_truncate(self) :
assert_raises(ValueError, self.p1.truncate, .5)
assert_raises(ValueError, self.p1.truncate, 0)
assert_equal(len(self.p1.truncate(4)), 3)
assert_equal(len(self.p1.truncate(3)), 3)
assert_equal(len(self.p1.truncate(2)), 2)
assert_equal(len(self.p1.truncate(1)), 1)
def test_copy(self) :
p = self.p1.copy()
assert_(self.p1 == p)
def test_integ(self) :
p = self.p2.integ()
assert_almost_equal(p.coef, poly.polyint([1,2,3], 1, 0, scl=.5))
p = self.p2.integ(lbnd=0)
assert_almost_equal(p(0), 0)
p = self.p2.integ(1, 1)
assert_almost_equal(p.coef, poly.polyint([1,2,3], 1, 1, scl=.5))
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.coef, poly.polyint([1,2,3], 2, [1, 2], scl=.5))
def test_deriv(self) :
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef)
assert_almost_equal(p.deriv(2).coef, self.p2.coef)
def test_roots(self) :
p = poly.Polynomial([0, -1, 0, 1], [0, 1])
res = p.roots()
tgt = [0, .5, 1]
assert_almost_equal(res, tgt)
def test_linspace(self):
xdes = np.linspace(0, 1, 20)
ydes = self.p2(xdes)
xres, yres = self.p2.linspace(20)
assert_almost_equal(xres, xdes)
assert_almost_equal(yres, ydes)
def test_fromroots(self) :
roots = [0, .5, 1]
p = poly.Polynomial.fromroots(roots, domain=[0, 1])
res = p.coef
tgt = [0, -1, 0, 1]
assert_almost_equal(res, tgt)
def test_fit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
x = np.linspace(0,3)
y = f(x)
# test default value of domain
p = poly.Polynomial.fit(x, y, 3)
assert_almost_equal(p.domain, [0,3])
# test that fit works in given domains
p = poly.Polynomial.fit(x, y, 3, None)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [0,3])
p = poly.Polynomial.fit(x, y, 3, [])
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [-1, 1])
# test that fit accepts weights.
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
p = poly.Polynomial.fit(x, yw, 3, w=w)
assert_almost_equal(p(x), y)
def test_identity(self) :
x = np.linspace(0,3)
p = poly.Polynomial.identity()
assert_almost_equal(p(x), x)
p = poly.Polynomial.identity([1,3])
assert_almost_equal(p(x), x)
| dagss/numpy_svn | numpy/polynomial/tests/test_polynomial.py | Python | bsd-3-clause | 16,380 |
import pytest
import torch
from sklearn import metrics
from torch.testing import assert_allclose
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp.training.metrics import Auc
class AucTest(AllenNlpTestCase):
@multi_device
def test_auc_computation(self, device: str):
auc = Auc()
all_predictions = []
all_labels = []
for _ in range(5):
predictions = torch.randn(8, device=device)
labels = torch.randint(0, 2, (8,), dtype=torch.long, device=device)
auc(predictions, labels)
all_predictions.append(predictions)
all_labels.append(labels)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
torch.cat(all_labels, dim=0).cpu().numpy(),
torch.cat(all_predictions, dim=0).cpu().numpy(),
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
# One more computation to assure reset works.
predictions = torch.randn(8, device=device)
labels = torch.randint(0, 2, (8,), dtype=torch.long, device=device)
auc(predictions, labels)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy()
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
@multi_device
def test_auc_gold_labels_behaviour(self, device: str):
# Check that it works with different pos_label
auc = Auc(positive_label=4)
predictions = torch.randn(8, device=device)
labels = torch.randint(3, 5, (8,), dtype=torch.long, device=device)
# We make sure that the positive label is always present.
labels[0] = 4
auc(predictions, labels)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
# Check that it errs on getting more than 2 labels.
with pytest.raises(ConfigurationError) as _:
labels = torch.tensor([3, 4, 5, 6, 7, 8, 9, 10], device=device)
auc(predictions, labels)
@multi_device
def test_auc_with_mask(self, device: str):
auc = Auc()
predictions = torch.randn(8, device=device)
labels = torch.randint(0, 2, (8,), dtype=torch.long, device=device)
mask = torch.tensor([True, True, True, True, False, False, False, False], device=device)
auc(predictions, labels, mask)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels[:4].cpu().numpy(), predictions[:4].cpu().numpy()
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
@multi_device
def test_auc_works_without_calling_metric_at_all(self, device: str):
auc = Auc()
auc.get_metric()
def test_distributed_auc(self):
predictions = torch.randn(8)
labels = torch.randint(3, 5, (8,), dtype=torch.long)
# We make sure that the positive label is always present.
labels[0] = 4
labels[4] = 4
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
)
predictions = [predictions[:4], predictions[4:]]
labels = [labels[:4], labels[4:]]
metric_kwargs = {"predictions": predictions, "gold_labels": labels}
desired_auc = metrics.auc(false_positive_rates, true_positive_rates)
run_distributed_test(
[-1, -1],
global_distributed_metric,
Auc(positive_label=4),
metric_kwargs,
desired_auc,
exact=False,
)
def test_distributed_auc_unequal_batches(self):
predictions = torch.randn(8)
labels = torch.randint(3, 5, (8,), dtype=torch.long)
# We make sure that the positive label is always present.
labels[0] = 4
labels[4] = 4
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
)
predictions = [predictions[:2], predictions[2:]]
labels = [labels[:2], labels[2:]]
metric_kwargs = {"predictions": predictions, "gold_labels": labels}
desired_auc = metrics.auc(false_positive_rates, true_positive_rates)
with pytest.raises(Exception) as _:
run_distributed_test(
[-1, -1],
global_distributed_metric,
Auc(positive_label=4),
metric_kwargs,
desired_auc,
exact=False,
)
| allenai/allennlp | tests/training/metrics/auc_test.py | Python | apache-2.0 | 5,421 |
# -*- coding: utf-8 -*-
"""
Bridge to the pandas library.
.. autosummary::
:toctree: _toctree/pandas_bridge
spiketrain_to_dataframe
event_to_dataframe
epoch_to_dataframe
multi_spiketrains_to_dataframe
multi_events_to_dataframe
multi_epochs_to_dataframe
slice_spiketrain
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import numpy as np
import pandas as pd
import warnings
import quantities as pq
from elephant.neo_tools import (extract_neo_attributes, get_all_epochs,
get_all_events, get_all_spiketrains)
warnings.simplefilter('once', DeprecationWarning)
warnings.warn("pandas_bridge module will be removed in Elephant v0.8.x",
DeprecationWarning)
def _multiindex_from_dict(inds):
"""Given a dictionary, return a `pandas.MultiIndex`.
Parameters
----------
inds : dict
A dictionary where the keys are annotations or attribute names and
the values are the corresponding annotation or attribute value.
Returns
-------
pandas MultiIndex
"""
names, indexes = zip(*sorted(inds.items()))
return pd.MultiIndex.from_tuples([indexes], names=names)
def _sort_inds(obj, axis=0):
"""Put the indexes and index levels of a pandas object in sorted order.
Paramters
---------
obj : pandas Series, DataFrame, Panel, or Panel4D
The object whose indexes should be sorted.
axis : int, list, optional, 'all'
The axis whose indexes should be sorted. Default is 0.
Can also be a list of indexes, in which case all of those axes
are sorted. If 'all', sort all indexes.
Returns
-------
pandas Series, DataFrame, Panel, or Panel4D
A copy of the object with indexes sorted.
Indexes are sorted in-place.
"""
if axis == 'all':
return _sort_inds(obj, axis=range(obj.ndim))
if hasattr(axis, '__iter__'):
for iax in axis:
obj = _sort_inds(obj, iax)
return obj
obj = obj.reorder_levels(sorted(obj.axes[axis].names), axis=axis)
return obj.sort_index(level=0, axis=axis, sort_remaining=True)
def _extract_neo_attrs_safe(obj, parents=True, child_first=True):
"""Given a neo object, return a dictionary of attributes and annotations.
This is done in a manner that is safe for `pandas` indexes.
Parameters
----------
obj : neo object
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
dict
A dictionary where the keys are annotations or attribute names and
the values are the corresponding annotation or attribute value.
"""
res = extract_neo_attributes(obj, skip_array=True, skip_none=True,
parents=parents, child_first=child_first)
for key, value in res.items():
res[key] = _convert_value_safe(value)
key2 = _convert_value_safe(key)
if key2 is not key:
res[key2] = res.pop(key)
return res
def _convert_value_safe(value):
"""Convert `neo` values to a value compatible with `pandas`.
Some types and dtypes used with neo are not safe to use with pandas in some
or all situations.
`quantities.Quantity` don't follow the normal python rule that values
with that are equal should have the same hash, making it fundamentally
incompatible with `pandas`.
On python 3, `pandas` coerces `S` dtypes to bytes, which are not always
safe to use.
Parameters
----------
value : any
Value to convert (if it has any known issues).
Returns
-------
any
`value` or a version of value with potential problems fixed.
"""
if hasattr(value, 'dimensionality'):
return (value.magnitude.tolist(), str(value.dimensionality))
if hasattr(value, 'dtype') and value.dtype.kind == 'S':
return value.astype('U').tolist()
if hasattr(value, 'tolist'):
return value.tolist()
if hasattr(value, 'decode') and not hasattr(value, 'encode'):
return value.decode('UTF8')
return value
def spiketrain_to_dataframe(spiketrain, parents=True, child_first=True):
"""Convert a `neo.SpikeTrain` to a `pandas.DataFrame`.
The `pandas.DataFrame` object has a single column, with each element
being the spike time converted to a `float` value in seconds.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations. The `index`
is the spike number.
Parameters
----------
spiketrain : neo SpikeTrain
The SpikeTrain to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
Returns
-------
pandas DataFrame
A DataFrame containing the spike times from `spiketrain`.
Notes
-----
The index name is `spike_number`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
attrs = _extract_neo_attrs_safe(spiketrain,
parents=parents, child_first=child_first)
columns = _multiindex_from_dict(attrs)
times = spiketrain.magnitude
times = pq.Quantity(times, spiketrain.units).rescale('s').magnitude
times = times[np.newaxis].T
index = pd.Index(np.arange(len(spiketrain)), name='spike_number')
pdobj = pd.DataFrame(times, index=index, columns=columns)
return _sort_inds(pdobj, axis=1)
def event_to_dataframe(event, parents=True, child_first=True):
"""Convert a `neo.core.Event` to a `pandas.DataFrame`.
The `pandas.DataFrame` object has a single column, with each element
being the event label from the `event.label` attribute.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations. The `index`
is the time stamp from the `event.times` attribute.
Parameters
----------
event : neo Event
The Event to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the labels from `event`.
Notes
-----
If the length of event.times and event.labels are not the same,
the longer will be truncated to the length of the shorter.
The index name is `times`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
attrs = _extract_neo_attrs_safe(event,
parents=parents, child_first=child_first)
columns = _multiindex_from_dict(attrs)
times = event.times.rescale('s').magnitude
labels = event.labels.astype('U')
times = times[:len(labels)]
labels = labels[:len(times)]
index = pd.Index(times, name='times')
pdobj = pd.DataFrame(labels[np.newaxis].T, index=index, columns=columns)
return _sort_inds(pdobj, axis=1)
def epoch_to_dataframe(epoch, parents=True, child_first=True):
"""Convert a `neo.core.Epoch` to a `pandas.DataFrame`.
The `pandas.DataFrame` object has a single column, with each element
being the epoch label from the `epoch.label` attribute.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations. The `index`
is a `pandas.MultiIndex`, with the first index being the time stamp from
the `epoch.times` attribute and the second being the duration from
the `epoch.durations` attribute.
Parameters
----------
epoch : neo Epoch
The Epoch to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the labels from `epoch`.
Notes
-----
If the length of `epoch.times`, `epoch.duration`, and `epoch.labels` are
not the same, the longer will be truncated to the length of the shortest.
The index names for `epoch.times` and `epoch.durations` are `times` and
`durations`, respectively.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
attrs = _extract_neo_attrs_safe(epoch,
parents=parents, child_first=child_first)
columns = _multiindex_from_dict(attrs)
times = epoch.times.rescale('s').magnitude
durs = epoch.durations.rescale('s').magnitude
labels = epoch.labels.astype('U')
minlen = min([len(durs), len(times), len(labels)])
index = pd.MultiIndex.from_arrays([times[:minlen], durs[:minlen]],
names=['times', 'durations'])
pdobj = pd.DataFrame(labels[:minlen][np.newaxis].T,
index=index, columns=columns)
return _sort_inds(pdobj, axis='all')
def _multi_objs_to_dataframe(container, conv_func, get_func,
parents=True, child_first=True):
"""Convert one or more of a given `neo` object to a `pandas.DataFrame`.
The objects can be any list, dict, or other iterable or mapping containing
the object, as well as any neo object that can hold the object.
Objects are searched recursively, so the objects can be nested (such as a
list of blocks).
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations of the respective
object.
Parameters
----------
container : list, tuple, iterable, dict, neo container object
The container for the objects to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the converted objects.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
res = pd.concat([conv_func(obj, parents=parents, child_first=child_first)
for obj in get_func(container)], axis=1)
return _sort_inds(res, axis=1)
def multi_spiketrains_to_dataframe(container,
parents=True, child_first=True):
"""Convert one or more `neo.SpikeTrain` objects to a `pandas.DataFrame`.
The objects can be any list, dict, or other iterable or mapping containing
spiketrains, as well as any neo object that can hold spiketrains:
`neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
Objects are searched recursively, so the objects can be nested (such as a
list of blocks).
The `pandas.DataFrame` object has one column for each spiketrain, with each
element being the spike time converted to a `float` value in seconds.
columns are padded to the same length with `NaN` values.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations of the respective
spiketrain. The `index` is the spike number.
Parameters
----------
container : list, tuple, iterable, dict,
neo Block, neo Segment, neo Unit, neo ChannelIndex
The container for the spiketrains to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the spike times from `container`.
Notes
-----
The index name is `spike_number`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
return _multi_objs_to_dataframe(container,
spiketrain_to_dataframe,
get_all_spiketrains,
parents=parents, child_first=child_first)
def multi_events_to_dataframe(container, parents=True, child_first=True):
"""Convert one or more `neo.Event` objects to a `pandas.DataFrame`.
The objects can be any list, dict, or other iterable or mapping containing
events, as well as any neo object that can hold events:
`neo.Block` and `neo.Segment`. Objects are searched recursively, so the
objects can be nested (such as a list of blocks).
The `pandas.DataFrame` object has one column for each event, with each
element being the event label. columns are padded to the same length with
`NaN` values.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations of the respective
event. The `index` is the time stamp from the `event.times` attribute.
Parameters
----------
container : list, tuple, iterable, dict, neo Block, neo Segment
The container for the events to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the labels from `container`.
Notes
-----
If the length of event.times and event.labels are not the same for any
individual event, the longer will be truncated to the length of the
shorter for that event. Between events, lengths can differ.
The index name is `times`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
return _multi_objs_to_dataframe(container,
event_to_dataframe, get_all_events,
parents=parents, child_first=child_first)
def multi_epochs_to_dataframe(container, parents=True, child_first=True):
"""Convert one or more `neo.Epoch` objects to a `pandas.DataFrame`.
The objects can be any list, dict, or other iterable or mapping containing
epochs, as well as any neo object that can hold epochs:
`neo.Block` and `neo.Segment`. Objects are searched recursively, so the
objects can be nested (such as a list of blocks).
The `pandas.DataFrame` object has one column for each epoch, with each
element being the epoch label. columns are padded to the same length with
`NaN` values.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations of the respective
epoch. The `index` is a `pandas.MultiIndex`, with the first index being
the time stamp from the `epoch.times` attribute and the second being the
duration from the `epoch.durations` attribute.
Parameters
----------
container : list, tuple, iterable, dict, neo Block, neo Segment
The container for the epochs to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the labels from `container`.
Notes
-----
If the length of `epoch.times`, `epoch.duration`, and `epoch.labels` are
not the same for any individual epoch, the longer will be truncated to the
length of the shorter for that epoch. Between epochs, lengths can differ.
The index level names for `epoch.times` and `epoch.durations` are
`times` and `durations`, respectively.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
return _multi_objs_to_dataframe(container,
epoch_to_dataframe, get_all_epochs,
parents=parents, child_first=child_first)
def slice_spiketrain(pdobj, t_start=None, t_stop=None):
"""Slice a `pandas.DataFrame`, changing indices appropriately.
Values outside the sliced range are converted to `NaN` values.
Slicing happens over columns.
This sets the `t_start` and `t_stop` column indexes to be the new values.
Otherwise it is the same as setting values outside the range to `NaN`.
Parameters
----------
pdobj : pandas DataFrame
The DataFrame to slice.
t_start : float, optional.
If specified, the returned DataFrame values less than this set
to `NaN`.
Default is `None` (do not use this argument).
t_stop : float, optional.
If specified, the returned DataFrame values greater than this set
to `NaN`.
Default is `None` (do not use this argument).
Returns
-------
pdobj : scalar, pandas Series, DataFrame, or Panel
The returned data type is the same as the type of `pdobj`
Notes
-----
The order of the index and/or column levels of the returned object may
differ from the order of the original.
If `t_start` or `t_stop` is specified, all columns indexes will be changed
to the respective values, including those already within the new range.
If `t_start` or `t_stop` is not specified, those column indexes will not
be changed.
Returns a copy, even if `t_start` and `t_stop` are both `None`.
"""
if t_start is None and t_stop is None:
return pdobj.copy()
if t_stop is not None:
pdobj[pdobj > t_stop] = np.nan
pdobj = pdobj.T.reset_index(level='t_stop')
pdobj['t_stop'] = t_stop
pdobj = pdobj.set_index('t_stop', append=True).T
pdobj = _sort_inds(pdobj, axis=1)
if t_start is not None:
pdobj[pdobj < t_start] = np.nan
pdobj = pdobj.T.reset_index(level='t_start')
pdobj['t_start'] = t_start
pdobj = pdobj.set_index('t_start', append=True).T
pdobj = _sort_inds(pdobj, axis=1)
return pdobj
| INM-6/elephant | elephant/pandas_bridge.py | Python | bsd-3-clause | 22,237 |
import csv
import json
import numpy as np
import pandas as pd
def read_delim(filepath):
"""
Reads delimited file (auto-detects delimiter + header). Returns list.
:param filepath: (str) location of delimited file
:return: (list) list of records w/o header
"""
f = open(filepath, 'r')
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
has_header = csv.Sniffer().has_header(f.read(1024))
f.seek(0)
reader = csv.reader(f, dialect)
if has_header:
reader.next()
ret = [line for line in reader]
return ret
def read_delim_pd(filepath):
"""
Reads delimited file (auto-detects delimiter + header). Returns pandas DataFrame.
:param filepath: (str) location of delimited file
:return: (DataFrame)
"""
f = open(filepath)
has_header = None
if csv.Sniffer().has_header(f.read(1024)):
has_header = 0
f.seek(0)
return pd.read_csv(f, header=has_header, sep=None, engine='python')
def lookup(table, lookup_cols, lookup_vals, output_cols=None, output_recs=None):
"""
Looks up records where lookup_cols == lookup_vals.
Optionally returns only specified output_cols and/or specified output_recs.
:param table: (DataFrame) the pandas DataFrame to use as a lookup table
:param lookup_cols: (str | list)
:param lookup_vals: (val | list)
:param output_cols:
:param output_recs:
:return:
"""
if type(lookup_cols) == str:
lookup_cols = [lookup_cols]
lookup_vals = [lookup_vals]
temp_df = pd.DataFrame(data=lookup_vals, columns=lookup_cols, copy=False)
output = table.merge(temp_df, copy=False)
if output_cols is not None:
if type(output_cols) == str:
output_cols = [output_cols]
output = output[output_cols]
if output_recs is not None:
output = output.iloc[output_recs]
return output
def generate_position_table(num_rc, space_rc, offset=(0.0,0.0,0.0), to_clipboard=False):
"""
Generates a position table for a plate. Assumes that 'x' and 'c' are aligned and that
'y' and 'r' are aligned. These axes can be reflected by negating the corresponding 'space_rc';
translations can be applied via 'offset'. All entries are indexed by 'n' (newspaper order)
and 's' (serpentine order). Other columns may be added as needed, but Autosampler.goto()
requires 'x', 'y', and 'z' to function properly.
:param num_rc: (tup) number of rows and columns (num_rows, num_cols)
:param space_rc: (tup) spacing for rows and columns [mm] (spacing_rows, spacing_cols)
:param offset: (tup) 3-tuple of floats to be added to x,y,z [mm]
:param to_clipboard: (bool) whether to copy the position_table to the OS clipboard
:return: (DataFrame)
"""
# TODO: instead of offset, full affine option? can use negative space rc to reflect,
# but can't remap x -> y
temp = list()
headers = ['n', 's', 'r', 'c', 'name', 'x', 'y', 'z']
for r in range(num_rc[0]):
for c in range(num_rc[1]):
n = c + r * num_rc[1]
s = ((r + 1) % 2) * (c + r * num_rc[1]) + (r % 2) * ((r + 1) * num_rc[1] - (c + 1))
name = chr(64 + r + 1) + '{:02d}'.format(c + 1)
x = float(c * space_rc[1] + offset[0])
y = float(r * space_rc[0] + offset[1])
z = float(offset[2])
temp.append([n, s, r, c, name, x, y, z])
position_table = pd.DataFrame(temp, columns=headers)
if to_clipboard:
position_table.to_clipboard(index=False)
return position_table
def spacing(num_rc, p1, p2):
r, c = map(float, num_rc)
return tuple(abs(np.nan_to_num(np.subtract(p2, p1) / (c - 1, r - 1))))
def load_mm_positionlist(filepath):
"""
Takes a MicroManager position list and converts it to a pandas DataFrame. Will load z-coordinates if
available.
:param filepath: (str)
:return: (DataFrame) position list with headers = "r, c, name, x, y, [z]"
"""
with open(filepath) as f:
data = json.load(f)
df_rcn = pd.io.json.json_normalize(data, ['POSITIONS'])[['GRID_ROW', 'GRID_COL', 'LABEL']]
df_pos = pd.io.json.json_normalize(data, ['POSITIONS', 'DEVICES'])[['DEVICE', 'X', 'Y']]
df_xy = df_pos.query("DEVICE=='XYStage'")[['X','Y']].reset_index(drop=True)
df = pd.concat([df_rcn,df_xy], axis=1)
# check for z-axis
ds_z = df_pos.query("DEVICE=='ZStage'")['X'].reset_index(drop=True)
if len(ds_z)>0:
df['z'] = ds_z
rename = {'GRID_ROW': 'r',
'GRID_COL': 'c',
'LABEL': 'name',
'X': 'x',
'Y': 'y'}
df.rename(columns=rename, inplace=True)
return df
def generate_grid(c0, c1, l_img, p):
"""
Based on two points, creates a 2D-acquisition grid similar to what MicroManager would produce.
:param c0: (arr) first point; numpy 1d array of len 2
:param c1: (arr) second point; numpy 1d array of len 2
:param l_img: (float)
:param p: (float) desired percent overlap
:return: (DataFrame) position_list in the same format as load_mm_positionlist
"""
# TODO: does generate_grid subsume generate_position_table?
# n -> number of stage positions on an axis
n = 1 + np.ceil(np.abs(c1 - c0) / ((1 - p) * l_img)) # ct,ct
n = n.astype('int')
# l_acq = total_movement + l_img
# l_acq = l_img * (n - n*p + p) # um,um
sign = np.sign(c1 - c0)
# could also use cartesian product (itertools.product OR np.mgrid, stack)
# https://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
position_list = pd.DataFrame(columns=['r', 'c', 'name', 'x', 'y'], )
for j in xrange(n[1]): # iter y
y = sign[1] * j * l_img * (1 - p) + c0[1]
for i in xrange(n[0]) if not (j % 2) else reversed(xrange(n[0])): # iter x (serp)
x = sign[0] * i * l_img * (1 - p) + c0[0]
r = j
c = i
name = '1-Pos_{:03}_{:03}'.format(c, r)
position_list.loc[len(position_list)] = [r, c, name, x, y]
position_list[['r', 'c']] = position_list[['r', 'c']].astype(int)
return position_list
| FordyceLab/AcqPack | acqpack/utils.py | Python | mit | 6,206 |
#
# ElementTree
# $Id: ElementInclude.py 3225 2007-08-27 21:32:08Z fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Limited XInclude support for the ElementTree package.
##
import ElementTree
def copy(elem):
e = ElementTree.Element(elem.tag, elem.attrib)
e.text = elem.text
e.tail = elem.tail
e[:] = elem
return e
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding.
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
file = open(href)
if parse == "xml":
data = ElementTree.parse(file).getroot()
else:
data = file.read()
if encoding:
data = data.decode(encoding)
file.close()
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
| SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/script.module.elementtree/lib/elementtree/ElementInclude.py | Python | gpl-2.0 | 5,051 |
import numpy as np
import matplotlib.pyplot as plt
plt.figure(1)
# ax1=plt.subplot(211)
x = np.linspace(-60,0,50)
y = 2/(1+np.exp((-x-15)/10))
for i in xrange(1,50):
plt.figure(1)
plt.plot(x,y)
plt.show() | xinghalo/DMInAction | src/test/07/time4.py | Python | apache-2.0 | 207 |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
r"""
There are three use cases for the ConfigFilter class:
1. Help enforce that a given module does not access options registered
by another module, without first declaring those cross-module
dependencies using import_opt().
2. Prevent private configuration opts from being visible to modules
other than the one which registered it.
3. Limit the options on a Cfg object that can be accessed.
Cross-Module Option Dependencies
--------------------------------
When using the global cfg.CONF object, it is quite common for a module
to require the existence of configuration options registered by other
modules.
For example, if module 'foo' registers the 'blaa' option and the module
'bar' uses the 'blaa' option then 'bar' might do::
import foo
print(CONF.blaa)
However, it's completely non-obvious why foo is being imported (is it
unused, can we remove the import) and where the 'blaa' option comes from.
The CONF.import_opt() method allows such a dependency to be explicitly
declared::
CONF.import_opt('blaa', 'foo')
print(CONF.blaa)
However, import_opt() has a weakness - if 'bar' imports 'foo' using the
import builtin and doesn't use import_opt() to import 'blaa', then 'blaa'
can still be used without problems. Similarly, where multiple options
are registered a module imported via importopt(), a lazy programmer can
get away with only declaring a dependency on a single option.
The ConfigFilter class provides a way to ensure that options are not
available unless they have been registered in the module or imported using
import_opt() for example with::
CONF = ConfigFilter(cfg.CONF)
CONF.import_opt('blaa', 'foo')
print(CONF.blaa)
no other options other than 'blaa' are available via CONF.
Private Configuration Options
-----------------------------
Libraries which register configuration options typically do not want
users of the library API to access those configuration options. If
API users do access private configuration options, those users will
be disrupted if and when a configuration option is renamed. In other
words, one does not typically wish for the name of the private config
options to be part of the public API.
The ConfigFilter class provides a way for a library to register
options such that they are not visible via the ConfigOpts instance
which the API user supplies to the library. For example::
from __future__ import print_function
from oslo_config.cfg import *
from oslo_config.cfgfilter import *
class Widget(object):
def __init__(self, conf):
self.conf = conf
self._private_conf = ConfigFilter(self.conf)
self._private_conf.register_opt(StrOpt('foo'))
@property
def foo(self):
return self._private_conf.foo
conf = ConfigOpts()
widget = Widget(conf)
print(widget.foo)
print(conf.foo) # raises NoSuchOptError
Limited Configuration Options
-----------------------------
It may be required that when passing a CONF object to other functions we want
to filter that the receiving code is only able to access a restricted subset
of the options that are available on the CONF object. This is essentially a
more general case of the Private Configuration Options and Cross-Module Options
whereby we expose an option that is already present on the underlying CONF
object without providing any means to load it if not present.
So given a CONF object with options defined::
CONF.register_opt(StrOpt('foo'))
CONF.register_opt(StrOpt('bar'))
we can expose options such that only those options are present::
restricted_conf = CfgFilter(CONF)
restricted_conf.expose_opt('foo')
print(restricted_conf.foo)
print(restricted_conf.bar) # raises NoSuchOptError
"""
import collections
import itertools
from oslo_config import cfg
class ConfigFilter(collections.Mapping):
"""A helper class which wraps a ConfigOpts object.
ConfigFilter enforces the explicit declaration of dependencies on external
options and allows private options which are not registered with the
wrapped Configopts object.
"""
def __init__(self, conf):
"""Construct a ConfigFilter object.
:param conf: a ConfigOpts object
"""
self._conf = conf
self._fconf = cfg.ConfigOpts()
self._sync()
self._imported_opts = set()
self._imported_groups = dict()
def _sync(self):
if self._fconf._namespace is not self._conf._namespace:
self._fconf.clear()
self._fconf._namespace = self._conf._namespace
self._fconf._args = self._conf._args
def __getattr__(self, name):
"""Look up an option value.
:param name: the opt name (or 'dest', more precisely)
:returns: the option value (after string subsititution) or a GroupAttr
:raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError
"""
if name in self._imported_groups:
return self._imported_groups[name]
elif name in self._imported_opts:
return getattr(self._conf, name)
else:
self._sync()
return getattr(self._fconf, name)
def __getitem__(self, key):
"""Look up an option value."""
return getattr(self, key)
def __contains__(self, key):
"""Return True if key is the name of a registered opt or group."""
return (key in self._fconf or
key in self._imported_opts or
key in self._imported_groups)
def __iter__(self):
"""Iterate over all registered opt and group names."""
return itertools.chain(self._fconf.keys(),
self._imported_opts,
self._imported_groups.keys())
def __len__(self):
"""Return the number of options and option groups."""
return (len(self._fconf) +
len(self._imported_opts) +
len(self._imported_groups))
@staticmethod
def _already_registered(conf, opt, group=None):
group_name = group.name if isinstance(group, cfg.OptGroup) else group
return ((group_name is None and
opt.dest in conf) or
(group_name is not None and
group_name in conf and
opt.dest in conf[group_name]))
def register_opt(self, opt, group=None):
"""Register an option schema.
:param opt: an instance of an Opt sub-class
:param group: an optional OptGroup object or group name
:return: False if the opt was already registered, True otherwise
:raises: DuplicateOptError
"""
if self._already_registered(self._conf, opt, group):
# Raises DuplicateError if there is another opt with the same name
ret = self._conf.register_opt(opt, group)
self._import_opt(opt.dest, group)
return ret
else:
return self._fconf.register_opt(opt, group)
def register_opts(self, opts, group=None):
"""Register multiple option schemas at once."""
for opt in opts:
self.register_opt(opt, group)
def register_cli_opt(self, opt, group=None):
"""Register a CLI option schema.
:param opt: an instance of an Opt sub-class
:param group: an optional OptGroup object or group name
:return: False if the opt was already register, True otherwise
:raises: DuplicateOptError, ArgsAlreadyParsedError
"""
if self._already_registered(self._conf, opt, group):
# Raises DuplicateError if there is another opt with the same name
ret = self._conf.register_cli_opt(opt, group)
self._import_opt(opt.dest, group)
return ret
else:
return self._fconf.register_cli_opt(opt, group)
def register_cli_opts(self, opts, group=None):
"""Register multiple CLI option schemas at once."""
for opt in opts:
self.register_cli_opt(opt, group)
def register_group(self, group):
"""Register an option group.
:param group: an OptGroup object
"""
self._fconf.register_group(group)
def import_opt(self, opt_name, module_str, group=None):
"""Import an option definition from a module.
:param name: the name/dest of the opt
:param module_str: the name of a module to import
:param group: an option OptGroup object or group name
:raises: NoSuchOptError, NoSuchGroupError
"""
self._conf.import_opt(opt_name, module_str, group)
self._import_opt(opt_name, group)
def import_group(self, group, module_str):
"""Import an option group from a module.
Note that this allows access to all options registered with
the group whether or not those options were registered by
the given module.
:param group: an option OptGroup object or group name
:param module_str: the name of a module to import
:raises: ImportError, NoSuchGroupError
"""
self._conf.import_group(group, module_str)
group = self._import_group(group)
group._all_opts = True
def _import_opt(self, opt_name, group):
if group is None:
self._imported_opts.add(opt_name)
return True
else:
group = self._import_group(group)
return group._import_opt(opt_name)
def _import_group(self, group_or_name):
if isinstance(group_or_name, cfg.OptGroup):
group_name = group_or_name.name
else:
group_name = group_or_name
if group_name in self._imported_groups:
return self._imported_groups[group_name]
else:
group = self.GroupAttr(self._conf, group_name)
self._imported_groups[group_name] = group
return group
def expose_opt(self, opt_name, group=None):
"""Expose an option from the underlying conf object.
This allows an object that has already been imported or used from the
base conf object to be seen from the filter object.
:param opt_name: the name/dest of the opt
:param group: an option OptGroup object or group name
"""
self._import_opt(opt_name, group)
def expose_group(self, group):
"""Expose all option from a group in the underlying conf object.
This allows an object that has already been imported or used from the
base conf object to be seen from the filter object.
:param group: an option OptGroup object or group name
"""
group = self._import_group(group)
group._all_opts = True
class GroupAttr(collections.Mapping):
"""Helper class to wrap a group object.
Represents the option values of a group as a mapping and attributes.
"""
def __init__(self, conf, group):
"""Construct a GroupAttr object.
:param conf: a ConfigOpts object
:param group: an OptGroup object
"""
self._conf = conf
self._group = group
self._imported_opts = set()
self._all_opts = False
def __getattr__(self, name):
"""Look up an option value."""
if not self._all_opts and name not in self._imported_opts:
raise cfg.NoSuchOptError(name)
return getattr(self._conf[self._group], name)
def __getitem__(self, key):
"""Look up an option value."""
return getattr(self, key)
def __contains__(self, key):
"""Return True if key is the name of a registered opt or group."""
return key in self._imported_opts
def __iter__(self):
"""Iterate over all registered opt and group names."""
for key in self._imported_opts:
yield key
def __len__(self):
"""Return the number of options and option groups."""
return len(self._imported_opts)
def _import_opt(self, opt_name):
self._imported_opts.add(opt_name)
| JioCloud/oslo.config | oslo_config/cfgfilter.py | Python | apache-2.0 | 12,758 |
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| django-debug-toolbar/django-debug-toolbar | example/manage.py | Python | bsd-3-clause | 314 |
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from scipy import signal
from scipy.stats import uniform, norm
import numpy as np
import seaborn as sns
import matplotlib.mlab as ml
import matplotlib.pyplot as plt
from sima import spikes
#########
# PART 1: Make model calcium data
#########
# Data parameters
RATE = 1 # mean firing rate of poisson spike train (Hz)
STEPS = 5000 # number of time steps in data
TAU = 0.6 # time constant of calcium indicator (seconds)
DELTAT = 1 / 30 # time step duration (seconds)
SIGMA = 0.1 # standard deviation of gaussian noise
SEED = 2222 # random number generator seed
NTRACE = 5 # number of data traces to generate
# Make a poisson spike trains
SPIKES = [spikes.get_poisson_spikes(deltat=DELTAT, rate=RATE,
steps=STEPS, seed=SEED + i)
for i in range(NTRACE)]
SPIKES = np.asarray(SPIKES)
# Convolve with kernel to make calcium signal
np.random.seed(SEED)
GAMMA = 1 - (DELTAT / TAU)
CALCIUM = signal.lfilter([1], [1, -GAMMA], SPIKES)
TIME = np.linspace(0, STEPS * DELTAT, STEPS)
# Make fluorescence traces with random gaussian noise and baseline
FLUORS = [CALCIUM[i, ] + norm.rvs(scale=SIGMA, size=STEPS) + uniform.rvs()
for i in range(NTRACE)]
FLUORS = np.asarray(FLUORS)
#########
# PART 2: Estimate model parameters and perform spike inference
#########
# Perform spike inference on all simulated fluorescence traces
INFERENCE = np.zeros([STEPS, NTRACE])
FITS = np.zeros([STEPS, NTRACE])
# Jointly estimate gamma on traces concatenated together
[joint_gamma_est, joint_sigma_est] = spikes.estimate_parameters(
FLUORS.reshape(FLUORS.size), mode="correct")
for x in range(NTRACE):
# Estimate noise and decay parameters
[gamma_est, sigma_est] = spikes.estimate_parameters(
FLUORS[x, ], mode="correct", gamma=joint_gamma_est)
print("tau = {tau}, sigma = {sigma}".format(
tau=DELTAT / (1 - gamma_est), sigma=sigma_est))
# Run spike inference
INFERENCE[:, x], FITS[:, x], params = spikes.spike_inference(
FLUORS[x, ], sigma=sigma_est, gamma=joint_gamma_est, verbose=True)
#########
# PART 3: Plot results
#########
# Close all open figures
plt.close("all")
# Set up plotting style
sns.set(context="talk", rc={"figure.figsize": [20, 6]}, style="white")
sns.set_palette("muted", desat=.6)
tck = [0, .5, 1]
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, facecolor='w')
# Which cell to plot in first figure
cp = 0
# Plot the simulated data and model fit from the first result
plt.axes(ax1)
sns.tsplot(FLUORS[cp, ], ax=ax1, time=TIME)
sns.tsplot(FITS[:, cp], ax=ax1, time=TIME, color="red")
ax1.set_ylabel("Data and Fit")
plt.yticks(
np.round([FLUORS[cp].min(), FLUORS[cp].mean(), FLUORS[cp].max()], 1))
# Plot the true spike train
plt.axes(ax2)
plt.bar(TIME, SPIKES[cp, ], color="DimGray", width=DELTAT)
ax2.set_ylabel("True Spikes")
plt.yticks(tck)
plt.ylim(-.1, 1.1)
# Get true positives and false positives
spike_cutoff = 0.1
i_times = ml.find(INFERENCE[:, cp] > spike_cutoff) # inferred spikes
t_times = ml.find(SPIKES[cp, :]) # true spikes
sInds = np.intersect1d(i_times, t_times) # indices of true positives
wInds = np.setdiff1d(i_times, t_times) # indices of false positives
tp = float(sInds.size) / float(i_times.size) # true positive rate
fp = float(wInds.size) / \
(STEPS - float(t_times.size)) # false positive rate
# Plot the spike inference
plt.axes(ax3)
plt.bar(
TIME[sInds], np.ones(sInds.size),
color="LightGrey", edgecolor="LightGrey", width=DELTAT)
plt.bar(
TIME[wInds], np.ones(wInds.size),
color="Red", edgecolor="Red", width=DELTAT)
plt.bar(
TIME, INFERENCE[:, 0] / INFERENCE[:, 0].max(),
color="DimGray", edgecolor="DimGray", width=DELTAT)
ax3.set_xlabel("Time (Seconds)")
ax3.set_ylabel("Spike Inference")
sns.despine(bottom=True, left=True)
plt.yticks(tck)
plt.ylim(-.1, 1.1)
plt.title(
"TP rate = " + str(round(tp, 2)) + "; FP rate = " + str(round(fp, 2)))
# Plot all traces and inference
plt.figure(5, facecolor='w')
plt.subplot(211)
plt.imshow(FLUORS, aspect="auto", interpolation="none")
plt.colorbar()
plt.subplot(212)
plt.imshow(INFERENCE.transpose(), aspect="auto", interpolation="none")
plt.colorbar()
plt.show()
| jzaremba/sima | examples/spikeinference.py | Python | gpl-2.0 | 4,351 |
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.db.sqlalchemy import models
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class AgentsJsonTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-agents"
def _get_flags(self):
f = super(AgentsJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.agents.Agents')
return f
def setUp(self):
super(AgentsJsonTest, self).setUp()
fake_agents_list = [{'url': 'http://example.com/path/to/resource',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
'version': '8.0',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1}]
def fake_agent_build_create(context, values):
values['id'] = 1
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
return agent_build_ref
def fake_agent_build_get_all(context, hypervisor):
agent_build_all = []
for agent in fake_agents_list:
if hypervisor and hypervisor != agent['hypervisor']:
continue
agent_build_ref = models.AgentBuild()
agent_build_ref.update(agent)
agent_build_all.append(agent_build_ref)
return agent_build_all
def fake_agent_build_update(context, agent_build_id, values):
pass
def fake_agent_build_destroy(context, agent_update_id):
pass
self.stub_out("nova.db.agent_build_create", fake_agent_build_create)
self.stub_out("nova.db.agent_build_get_all", fake_agent_build_get_all)
self.stub_out("nova.db.agent_build_update", fake_agent_build_update)
self.stub_out("nova.db.agent_build_destroy", fake_agent_build_destroy)
def test_agent_create(self):
# Creates a new agent build.
project = {'url': 'http://example.com/path/to/resource',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
'version': '8.0',
'md5hash': 'add6bb58e139be103324d04d82d8f545'
}
response = self._do_post('os-agents', 'agent-post-req',
project)
self._verify_response('agent-post-resp', project, response, 200)
def test_agent_list(self):
# Return a list of all agent builds.
response = self._do_get('os-agents')
self._verify_response('agents-get-resp', {}, response, 200)
def test_agent_update(self):
# Update an existing agent build.
agent_id = 1
subs = {'version': '7.0',
'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}
response = self._do_put('os-agents/%s' % agent_id,
'agent-update-put-req', subs)
self._verify_response('agent-update-put-resp', subs, response, 200)
def test_agent_delete(self):
# Deletes an existing agent build.
agent_id = 1
response = self._do_delete('os-agents/%s' % agent_id)
self.assertEqual(200, response.status_code)
| NeCTAR-RC/nova | nova/tests/functional/api_sample_tests/test_agents.py | Python | apache-2.0 | 4,255 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Satpy developers
#
# This file is part of Satpy.
#
# Satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Satpy. If not, see <http://www.gnu.org/licenses/>.
"""Module for testing the satpy.readers.tropomi_l2 module."""
import os
from datetime import datetime
from unittest import mock
import numpy as np
import pytest
import xarray as xr
METOP_FILE = "IMG_SX.M2.D17037.S1601.E1607.B0000001.WE.HR.ORB.nc"
NPP_MIRS_L2_SWATH = "NPR-MIRS-IMG_v11r6_npp_s201702061601000_e201702061607000_c202012201658410.nc"
N20_MIRS_L2_SWATH = "NPR-MIRS-IMG_v11r4_n20_s201702061601000_e201702061607000_c202012201658410.nc"
OTHER_MIRS_L2_SWATH = "NPR-MIRS-IMG_v11r4_gpm_s201702061601000_e201702061607000_c202010080001310.nc"
EXAMPLE_FILES = [METOP_FILE, NPP_MIRS_L2_SWATH, OTHER_MIRS_L2_SWATH]
N_CHANNEL = 22
N_FOV = 96
N_SCANLINE = 100
DEFAULT_FILE_DTYPE = np.float64
DEFAULT_2D_SHAPE = (N_SCANLINE, N_FOV)
DEFAULT_DATE = datetime(2019, 6, 19, 13, 0)
DEFAULT_LAT = np.linspace(23.09356, 36.42844, N_SCANLINE * N_FOV,
dtype=DEFAULT_FILE_DTYPE)
DEFAULT_LON = np.linspace(127.6879, 144.5284, N_SCANLINE * N_FOV,
dtype=DEFAULT_FILE_DTYPE)
FREQ = xr.DataArray([23.8, 31.4, 50.3, 51.76, 52.8, 53.596, 54.4, 54.94, 55.5,
57.29, 57.29, 57.29, 57.29, 57.29, 57.29, 88.2, 165.5,
183.31, 183.31, 183.31, 183.31, 183.31][:N_CHANNEL],
dims='Channel',
attrs={'description': "Central Frequencies (GHz)"})
POLO = xr.DataArray([2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3,
3, 3, 3][:N_CHANNEL], dims='Channel',
attrs={'description': "Polarizations"})
DS_IDS = ['RR', 'longitude', 'latitude']
TEST_VARS = ['btemp_88v', 'btemp_165h',
'btemp_23v', 'RR', 'Sfc_type']
DEFAULT_UNITS = {'btemp_88v': 'K', 'btemp_165h': 'K',
'btemp_23v': 'K', 'RR': 'mm/hr', 'Sfc_type': "1"}
PLATFORM = {"M2": "metop-a", "NPP": "npp", "GPM": "gpm"}
SENSOR = {"m2": "amsu-mhs", "npp": "atms", "gpm": "GPI"}
START_TIME = datetime(2017, 2, 6, 16, 1, 0)
END_TIME = datetime(2017, 2, 6, 16, 7, 0)
def fake_coeff_from_fn(fn):
"""Create Fake Coefficients."""
ameans = np.random.uniform(261, 267, N_CHANNEL)
locations = [
[1, 2],
[1, 2],
[3, 4, 5],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8],
[9, 10, 11],
[10, 11],
[10, 11, 12],
[11, 12, 13],
[12, 13],
[12, 13, 14],
[14, 15],
[1, 16],
[17, 18],
[18, 19],
[18, 19, 20],
[19, 20, 21],
[20, 21, 22],
[21, 22],
]
all_nchx = [len(loc) for loc in locations]
coeff_str = []
for idx in range(1, N_CHANNEL + 1):
nx = idx - 1
coeff_str.append('\n')
next_line = ' {} {} {}\n'.format(idx, all_nchx[nx], ameans[nx])
coeff_str.append(next_line)
next_line = ' {}\n'.format(" ".join([str(x) for x in locations[idx - 1]]))
coeff_str.append(next_line)
for fov in range(1, N_FOV+1):
random_coeff = np.random.rand(all_nchx[nx])
random_coeff = np.ones(all_nchx[nx])
str_coeff = ' '.join([str(x) for x in random_coeff])
random_means = np.random.uniform(261, 267, all_nchx[nx])
random_means = np.zeros(all_nchx[nx])
str_means = ' '.join([str(x) for x in random_means])
error_val = np.random.uniform(0, 4)
coeffs_line = ' {:>2} {:>2} {} {} {}\n'.format(idx, fov,
str_coeff,
str_means,
error_val)
coeff_str.append(coeffs_line)
return coeff_str
def _get_datasets_with_attributes(**kwargs):
"""Represent files with two resolution of variables in them (ex. OCEAN)."""
bt = xr.DataArray(np.linspace(1830, 3930, N_SCANLINE * N_FOV * N_CHANNEL).
reshape(N_SCANLINE, N_FOV, N_CHANNEL),
attrs={'long_name': "Channel Temperature (K)",
'units': "Kelvin",
'coordinates': "Longitude Latitude Freq",
'scale_factor': 0.01,
'_FillValue': -999,
'valid_range': [0, 50000]},
dims=('Scanline', 'Field_of_view', 'Channel'))
rr = xr.DataArray(np.random.randint(100, 500, size=(N_SCANLINE, N_FOV)),
attrs={'long_name': "Rain Rate (mm/hr)",
'units': "mm/hr",
'coordinates': "Longitude Latitude",
'scale_factor': 0.1,
'_FillValue': -999,
'valid_range': [0, 1000]},
dims=('Scanline', 'Field_of_view'))
sfc_type = xr.DataArray(np.random.randint(0, 4, size=(N_SCANLINE, N_FOV)),
attrs={'description': "type of surface:0-ocean," +
"1-sea ice,2-land,3-snow",
'units': "1",
'coordinates': "Longitude Latitude",
'_FillValue': -999,
'valid_range': [0, 3]
},
dims=('Scanline', 'Field_of_view'))
latitude = xr.DataArray(DEFAULT_LAT.reshape(DEFAULT_2D_SHAPE),
attrs={'long_name':
"Latitude of the view (-90,90)"},
dims=('Scanline', 'Field_of_view'))
longitude = xr.DataArray(DEFAULT_LON.reshape(DEFAULT_2D_SHAPE),
attrs={'long_name':
"Longitude of the view (-180,180)"},
dims=('Scanline', 'Field_of_view'))
ds_vars = {
'Freq': FREQ,
'Polo': POLO,
'BT': bt,
'RR': rr,
'Sfc_type': sfc_type,
'Latitude': latitude,
'Longitude': longitude
}
attrs = {'missing_value': -999.}
ds = xr.Dataset(ds_vars, attrs=attrs)
ds = ds.assign_coords({"Freq": FREQ, "Latitude": latitude, "Longitude": longitude})
return ds
def _get_datasets_with_less_attributes():
"""Represent files with two resolution of variables in them (ex. OCEAN)."""
bt = xr.DataArray(np.linspace(1830, 3930, N_SCANLINE * N_FOV * N_CHANNEL).
reshape(N_SCANLINE, N_FOV, N_CHANNEL),
attrs={'long_name': "Channel Temperature (K)",
'scale_factor': 0.01},
dims=('Scanline', 'Field_of_view', 'Channel'))
rr = xr.DataArray(np.random.randint(100, 500, size=(N_SCANLINE, N_FOV)),
attrs={'long_name': "Rain Rate (mm/hr)",
'scale_factor': 0.1},
dims=('Scanline', 'Field_of_view'))
sfc_type = xr.DataArray(np.random.randint(0, 4, size=(N_SCANLINE, N_FOV)),
attrs={'description': "type of surface:0-ocean," +
"1-sea ice,2-land,3-snow"},
dims=('Scanline', 'Field_of_view'))
latitude = xr.DataArray(DEFAULT_LAT.reshape(DEFAULT_2D_SHAPE),
attrs={'long_name':
"Latitude of the view (-90,90)"},
dims=('Scanline', 'Field_of_view'))
longitude = xr.DataArray(DEFAULT_LON.reshape(DEFAULT_2D_SHAPE),
attrs={"long_name":
"Longitude of the view (-180,180)"},
dims=('Scanline', 'Field_of_view'))
ds_vars = {
'Freq': FREQ,
'Polo': POLO,
'BT': bt,
'RR': rr,
'Sfc_type': sfc_type,
'Longitude': longitude,
'Latitude': latitude
}
attrs = {'missing_value': -999.}
ds = xr.Dataset(ds_vars, attrs=attrs)
ds = ds.assign_coords({"Freq": FREQ, "Latitude": latitude, "Longitude": longitude})
return ds
def fake_open_dataset(filename, **kwargs):
"""Create a Dataset similar to reading an actual file with xarray.open_dataset."""
if filename == METOP_FILE:
return _get_datasets_with_less_attributes()
return _get_datasets_with_attributes()
class TestMirsL2_NcReader:
"""Test mirs Reader."""
yaml_file = "mirs.yaml"
def setup_method(self):
"""Read fake data."""
from satpy._config import config_search_paths
self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file))
@pytest.mark.parametrize(
("filenames", "expected_loadables"),
[
([METOP_FILE], 1),
([NPP_MIRS_L2_SWATH], 1),
([OTHER_MIRS_L2_SWATH], 1),
]
)
def test_reader_creation(self, filenames, expected_loadables):
"""Test basic initialization."""
from satpy.readers import load_reader
with mock.patch('satpy.readers.mirs.xr.open_dataset') as od:
od.side_effect = fake_open_dataset
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames(filenames)
assert len(loadables) == expected_loadables
r.create_filehandlers(loadables)
# make sure we have some files
assert r.file_handlers
@pytest.mark.parametrize(
("filenames", "expected_datasets"),
[
([METOP_FILE], DS_IDS),
([NPP_MIRS_L2_SWATH], DS_IDS),
([OTHER_MIRS_L2_SWATH], DS_IDS),
]
)
def test_available_datasets(self, filenames, expected_datasets):
"""Test that variables are dynamically discovered."""
from satpy.readers import load_reader
with mock.patch('satpy.readers.mirs.xr.open_dataset') as od:
od.side_effect = fake_open_dataset
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames(filenames)
r.create_filehandlers(loadables)
avails = list(r.available_dataset_names)
for var_name in expected_datasets:
assert var_name in avails
@staticmethod
def _check_area(data_arr):
from pyresample.geometry import SwathDefinition
area = data_arr.attrs['area']
assert isinstance(area, SwathDefinition)
@staticmethod
def _check_fill(data_arr):
assert '_FillValue' not in data_arr.attrs
if np.issubdtype(data_arr.dtype, np.floating):
# we started with float32, it should stay that way
assert data_arr.dtype.type == np.float64
@staticmethod
def _check_valid_range(data_arr, test_valid_range):
# valid_range is popped out of data_arr.attrs when it is applied
assert 'valid_range' not in data_arr.attrs
assert data_arr.data.min() >= test_valid_range[0]
assert data_arr.data.max() <= test_valid_range[1]
@staticmethod
def _check_fill_value(data_arr, test_fill_value):
assert '_FillValue' not in data_arr.attrs
assert not (data_arr.data == test_fill_value).any()
@staticmethod
def _check_attrs(data_arr, platform_name):
attrs = data_arr.attrs
assert 'scale_factor' not in attrs
assert 'platform_name' in attrs
assert attrs['platform_name'] == platform_name
assert attrs['start_time'] == START_TIME
assert attrs['end_time'] == END_TIME
@pytest.mark.parametrize(
("filenames", "loadable_ids", "platform_name"),
[
([METOP_FILE], TEST_VARS, "metop-a"),
([NPP_MIRS_L2_SWATH], TEST_VARS, "npp"),
([N20_MIRS_L2_SWATH], TEST_VARS, "noaa-20"),
([OTHER_MIRS_L2_SWATH], TEST_VARS, "gpm"),
]
)
@pytest.mark.parametrize('reader_kw', [{}, {'limb_correction': False}])
def test_basic_load(self, filenames, loadable_ids,
platform_name, reader_kw):
"""Test that variables are loaded properly."""
from satpy.readers import load_reader
with mock.patch('satpy.readers.mirs.xr.open_dataset') as od:
od.side_effect = fake_open_dataset
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames(filenames)
r.create_filehandlers(loadables, fh_kwargs=reader_kw)
with mock.patch('satpy.readers.mirs.read_atms_coeff_to_string') as \
fd, mock.patch('satpy.readers.mirs.retrieve'):
fd.side_effect = fake_coeff_from_fn
loaded_data_arrs = r.load(loadable_ids)
assert len(loaded_data_arrs) == len(loadable_ids)
test_data = fake_open_dataset(filenames[0])
for _data_id, data_arr in loaded_data_arrs.items():
data_arr = data_arr.compute()
var_name = data_arr.attrs["name"]
if var_name not in ['latitude', 'longitude']:
self._check_area(data_arr)
self._check_fill(data_arr)
self._check_attrs(data_arr, platform_name)
input_fake_data = test_data['BT'] if "btemp" in var_name \
else test_data[var_name]
if "valid_range" in input_fake_data.attrs:
valid_range = input_fake_data.attrs["valid_range"]
self._check_valid_range(data_arr, valid_range)
if "_FillValue" in input_fake_data.attrs:
fill_value = input_fake_data.attrs["_FillValue"]
self._check_fill_value(data_arr, fill_value)
sensor = data_arr.attrs['sensor']
if reader_kw.get('limb_correction', True) and sensor == 'atms':
fd.assert_called()
else:
fd.assert_not_called()
assert data_arr.attrs['units'] == DEFAULT_UNITS[var_name]
| pytroll/satpy | satpy/tests/reader_tests/test_mirs.py | Python | gpl-3.0 | 14,863 |
#!/usr/bin/env python
import numpy
import os
import random
import cPickle as pickle
import argparse
class Namegen(object):
PROB_PATH = 'prob.pickle'
def __init__(self, corpus='male.txt'):
if not os.path.exists(Namegen.PROB_PATH):
self.prob, self.sums = self.read_corpus(corpus)
self.save_arrays()
else:
self.prob, self.sums = self.load_arrays()
def load_arrays(self):
"""
Loads the numpy array from the pickled file on disk
"""
with open(Namegen.PROB_PATH, 'rb') as prob_file:
return pickle.load(prob_file)
def save_arrays(self):
"""
Pickles the numpy array to disk
"""
with open(Namegen.PROB_PATH, 'wb') as prob_file:
pickle.dump((self.prob, self.sums), prob_file, pickle.HIGHEST_PROTOCOL)
def to_ordinal(self, c):
"""
Converts the char c to its appropriate index in numpy array.
"""
return 0 if c == ' ' else ord(c.lower()) - 96
def bi_to_ordinal(self, bi):
"""
Converts the string bi to the proper row index in the numpy array.
"""
return 27 * self.to_ordinal(bi[0]) + self.to_ordinal(bi[1])
def from_ordinal(self, i):
return ' ' if i == 0 else chr(i + 96)
def read_corpus(self, path):
with open(path, 'r') as file:
return self.create_prob(file)
def create_prob(self, file):
"""
Creates the numpy array that holds the number of occurrences of the
bigrams.
"""
prob = numpy.zeros((729, 27), dtype=numpy.int16)
for line in file:
line = line.rstrip()
if not line.isalpha():
continue
#two in the front one in the back
line = ' ' + line + ' '
for i in xrange(2, len(line)):
prev =self.bi_to_ordinal(line[i - 2:i])
cur = self.to_ordinal(line[i])
prob[prev, cur] += 1
return prob, numpy.sum(prob, axis=1)
def pick_char(self, previous):
"""
Picks the next character given the previous bigram.
"""
ordinal = self.bi_to_ordinal(previous)
total = self.sums[ordinal]
if not total:
return ' '
val = random.randint(0, total - 1)
i = 0
while val >= self.prob[ordinal, i]:
val -= self.prob[ordinal, i]
i += 1
return self.from_ordinal(i)
def generate(self):
"""
Generates a random name.
"""
name = ' '
while True:
name += self.pick_char(name[-2:])
if name[-1] == ' ':
return name.strip().capitalize()
def generate_clean(self):
"""
Generates a random name with length between 4 and 8.
"""
while True:
name = self.generate()
if 4 <= len(name) <= 8:
return name
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Namegen: Random name generator.')
parser.add_argument('-n', dest='count', type=int, default=1, help='The number of names to generate')
args = parser.parse_args()
generator = Namegen()
for i in xrange(args.count):
print(generator.generate_clean())
| volker48/namegen | namegen.py | Python | mit | 3,338 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Internal PopenWorker for PopenPool."""
import sys
import os
import struct
import threading
import traceback
import pickle
import logging
import cloudpickle
from tvm.contrib.popen_pool import StatusKind
class TimeoutStatus:
__slot__ = ["status"]
def __init__(self):
self.status = StatusKind.RUNNING
def main():
"""Main worker function"""
if len(sys.argv) != 3:
print("Usage: <read_fd> <write_fd>")
return
if sys.platform == "win32":
# pylint: disable=import-outside-toplevel
import msvcrt
reader = os.fdopen(msvcrt.open_osfhandle(int(sys.argv[1]), os.O_BINARY), "rb")
writer = os.fdopen(msvcrt.open_osfhandle(int(sys.argv[2]), os.O_BINARY), "wb")
else:
reader = os.fdopen(int(sys.argv[1]), "rb")
writer = os.fdopen(int(sys.argv[2]), "wb")
logging.basicConfig(level=logging.INFO)
lock = threading.Lock()
def _respond(ret_value):
"""Send data back to the client."""
data = cloudpickle.dumps(ret_value, protocol=pickle.HIGHEST_PROTOCOL)
writer.write(struct.pack("<i", len(data)))
writer.write(data)
writer.flush()
def _cancel_run(status):
lock.acquire()
if status.status == StatusKind.RUNNING:
_respond((StatusKind.TIMEOUT, TimeoutError()))
status.status = StatusKind.TIMEOUT
lock.release()
while True:
raw_bytes_size = reader.read(4)
if len(raw_bytes_size) != 4:
# the parent exited
return
bytes_size = struct.unpack("<i", raw_bytes_size)[0]
fn, args, kwargs, timeout = cloudpickle.loads(reader.read(bytes_size))
status = TimeoutStatus()
if timeout is not None:
watcher = threading.Timer(timeout, _cancel_run, [status])
watcher.daemon = True
watcher.start()
# pylint: disable=broad-except
try:
result = fn(*args, **kwargs)
ret_value = (StatusKind.COMPLETE, result)
except Exception as exception:
msg = traceback.format_exc()
ret_value = (StatusKind.EXCEPTION, type(exception)(msg))
if timeout is not None:
watcher.cancel()
lock.acquire()
if status.status == StatusKind.RUNNING:
_respond(ret_value)
status.status = StatusKind.COMPLETE
lock.release()
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, IOError):
pass
| Laurawly/tvm-1 | python/tvm/exec/popen_worker.py | Python | apache-2.0 | 3,340 |
from django.views.generic.base import TemplateView, View
from django.http import (
HttpResponse,
HttpResponseNotAllowed,
HttpResponseRedirect)
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.models import User
from django.contrib.auth import login
class SPAView(TemplateView):
template_name = 'libraryapp/main.html' | andela-sjames/Django-ReactJS-Library-App | reactlibapp/libraryapp/views.py | Python | mit | 362 |
#!/usr/bin/env python
# -*- coding: ascii -*-
# flake8: noqa
#
# Copyright 2011, 2012
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
=====================
Javascript Minifier
=====================
rJSmin is a javascript minifier written in python.
The minifier is based on the semantics of `jsmin.c by Douglas Crockford`_\.
The module is a re-implementation aiming for speed, so it can be used at
runtime (rather than during a preprocessing step). Usually it produces the
same results as the original ``jsmin.c``. It differs in the following ways:
- there is no error detection: unterminated string, regex and comment
literals are treated as regular javascript code and minified as such.
- Control characters inside string and regex literals are left untouched; they
are not converted to spaces (nor to \n)
- Newline characters are not allowed inside string and regex literals, except
for line continuations in string literals (ECMA-5).
- "return /regex/" is recognized correctly.
- "+ +" and "- -" sequences are not collapsed to '++' or '--'
- Newlines before ! operators are removed more sensibly
- rJSmin does not handle streams, but only complete strings. (However, the
module provides a "streamy" interface).
Since most parts of the logic are handled by the regex engine it's way
faster than the original python port of ``jsmin.c`` by Baruch Even. The speed
factor varies between about 6 and 55 depending on input and python version
(it gets faster the more compressed the input already is). Compared to the
speed-refactored python port by Dave St.Germain the performance gain is less
dramatic but still between 1.2 and 7. See the docs/BENCHMARKS file for
details.
rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more.
Both python 2 and python 3 are supported.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
"""
__author__ = u'André Malo'
__author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = '1.0.5'
__all__ = ['jsmin']
import re as _re
def _make_jsmin(python_only=False):
"""
Generate JS minifier based on `jsmin.c by Douglas Crockford`_
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable``
"""
# pylint: disable = R0912, R0914, W0612
if not python_only:
try:
import _rjsmin
except ImportError:
pass
else:
return _rjsmin.jsmin
try:
xrange
except NameError:
xrange = range # pylint: disable = W0622
space_chars = r'[\000-\011\013\014\016-\040]'
line_comment = r'(?://[^\r\n]*)'
space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = \
r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)'
string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
strings = r'(?:%s|%s)' % (string1, string2)
charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
nospecial = r'[^/\\\[\r\n]'
regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % (
nospecial, charclass, nospecial
)
space = r'(?:%s|%s)' % (space_chars, space_comment)
newline = r'(?:%s?[\r\n])' % line_comment
def fix_charclass(result):
""" Fixup string of chars to fit into a regex char class """
pos = result.find('-')
if pos >= 0:
result = r'%s%s-' % (result[:pos], result[pos + 1:])
def sequentize(string):
"""
Notate consecutive characters as sequence
(1-4 instead of 1234)
"""
first, last, result = None, None, []
for char in map(ord, string):
if last is None:
first = last = char
elif last + 1 == char:
last = char
else:
result.append((first, last))
first = last = char
if last is not None:
result.append((first, last))
return ''.join(['%s%s%s' % (
chr(first),
last > first + 1 and '-' or '',
last != first and chr(last) or ''
) for first, last in result])
return _re.sub(r'([\000-\040\047])', # for better portability
lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result)
.replace('\\', '\\\\')
.replace('[', '\\[')
.replace(']', '\\]')
)
)
def id_literal_(what):
""" Make id_literal like char class """
match = _re.compile(what).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return '[^%s]' % fix_charclass(result)
def not_id_literal_(keep):
""" Make negated id_literal like char class """
match = _re.compile(id_literal_(keep)).match
result = ''.join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return r'[%s]' % fix_charclass(result)
not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
preregex1 = r'[(,=:\[!&|?{};\r\n]'
preregex2 = r'%(not_id_literal)sreturn' % locals()
id_literal = id_literal_(r'[a-zA-Z0-9_$]')
id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(!+-]')
id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
space_sub = _re.compile((
r'([^\047"/\000-\040]+)'
r'|(%(strings)s[^\047"/\000-\040]*)'
r'|(?:(?<=%(preregex1)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))'
r'|(?:(?<=%(preregex2)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))'
r'|(?<=%(id_literal_close)s)'
r'%(space)s*(?:(%(newline)s)%(space)s*)+'
r'(?=%(id_literal_open)s)'
r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
r'|(?<=\+)(%(space)s)+(?=\+)'
r'|(?<=-)(%(space)s)+(?=-)'
r'|%(space)s+'
r'|(?:%(newline)s%(space)s*)+'
) % locals()).sub
#print space_sub.__self__.pattern
def space_subber(match):
""" Substitution callback """
# pylint: disable = C0321, R0911
groups = match.groups()
if groups[0]: return groups[0]
elif groups[1]: return groups[1]
elif groups[2]: return groups[2]
elif groups[3]: return groups[3]
elif groups[4]: return '\n'
elif groups[5] or groups[6] or groups[7]: return ' '
else: return ''
def jsmin(script): # pylint: disable = W0621
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
return space_sub(space_subber, '\n%s\n' % script).strip()
return jsmin
jsmin = _make_jsmin()
def jsmin_for_posers(script):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Warning: This function is the digest of a _make_jsmin() call. It just
utilizes the resulting regex. It's just for fun here and may
vanish any time. Use the `jsmin` function instead.
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
(groups[6] and ' ') or
(groups[7] and ' ') or
''
)
return _re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#'
r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^'
r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|'
r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0'
r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:'
r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
if __name__ == '__main__':
import sys as _sys
_sys.stdout.write(jsmin(_sys.stdin.read()))
| ofer43211/unisubs | apps/unisubs_compressor/contrib/rjsmin.py | Python | agpl-3.0 | 10,789 |
#find L.C.M using recursion
print ('hi krishnakant welcome in python')
def lcm(x, y):
if x > y:
grt = x
else:
grt = y
while(True):
if((grt % x == 0) and (grt % y == 0)):
lcm = grt
break
grt += 1
return lcm
num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
print('LCM of', num1,"and", num2,"is", lcm(num1, num2))
| krishnakantkumar0/Simple-Python | 38.py | Python | gpl-3.0 | 410 |
#!/usr/bin/env python3
import os
import launch_jobs
import path_utils
class CustomTask(launch_jobs.BaseTask):
def get_desc(self):
return "rm"
def _read_params(self):
target_path = None
ignore_non_pre_existence = "ignore_non_pre_existence" in self.params
# target_path
try:
target_path = self.params["target_path"]
except KeyError:
return False, "target_path is a required parameter"
return True, (target_path, ignore_non_pre_existence)
def run_task(self, feedback_object, execution_name=None):
v, r = self._read_params()
if not v:
return False, r
target_path, ignore_non_pre_existence = r
if not os.path.exists(target_path):
if ignore_non_pre_existence:
return True, "target_path [%s] does not exist (ignored)" % target_path
return False, "target_path [%s] does not exist" % target_path
if os.path.isdir(target_path):
return False, "target_path [%s] is a directory" % target_path
os.unlink(target_path)
return True, None
| mvendra/mvtools | launchers/launch_jobs_plugins/tasks/rm_plugin.py | Python | mit | 1,147 |
import asyncio
import time
from generic.game import REGISTERED_GAMES
from generic.game.exeptions import GameAlreadyExists, GameNotExists
from generic.game.signals import Signal
class game(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if attrs['slug']:
if attrs['slug'] in REGISTERED_GAMES:
raise GameAlreadyExists
REGISTERED_GAMES[attrs['slug']] = new_cls
return new_cls
class BaseGame(metaclass=game):
name = 'Game'
version = '0.1.0'
slug = None
keep_alive_period = 5
scenarios = ()
@classmethod
def info(cls, extended=False):
info = {
'name': cls.name,
'version': cls.version,
'slug': cls.slug,
}
if extended:
extended_info = {
'scenarios': cls.get_scenarios_info()
}
info.update(extended_info)
return info
@classmethod
def get_scenarios_info(cls):
return {s.name: s.description for s in cls.scenarios}
def __init__(self, loop=None):
self.loop = loop or asyncio.get_event_loop()
self._transport = None
self._delayed_start = False
self.units = {}
self.start_time = 0 #store game start in timestamp
self.init()
def init(self):
pass
def __repr__(self):
return '{} <{}>'.format(self.name, self.version)
def add_unit(self, unit):
self.units[unit] = unit
def destroy_unit(self, unit):
unit.destroy()
del self.units[unit]
def broadcast(self, signal):
self.on(signal)
def on(self, signal):
for unit in self.units.values():
unit.on(signal)
@property
def transport(self):
return self._transport
@transport.setter
def transport(self, value):
self._transport = value
self._keep_alive_timer = self.loop.call_later(self.keep_alive_period, self.keep_alive)
if self._delayed_start:
self.start(delayed=False)
def push(self, state):
self.transport.send(state)
def start(self, delayed=True):
if self.transport:
self.start_time = time.time() * 1000
self.on(Signal('start'))
elif delayed:
self._delayed_start = True
@property
def time(self):
return (time.time() * 1000) - self.start_time
def keep_alive(self):
self.transport.send(self.info())
self._keep_alive_timer = self.loop.call_later(self.keep_alive_period, self.keep_alive)
class GameFactory(object):
def __init__(self, game_slug):
self.game_slug = game_slug
def __call__(self, *args, **kwargs):
return self.create(*args, **kwargs)
def create(self, *args, **kwargs):
if self.game_slug not in REGISTERED_GAMES:
raise GameNotExists
return REGISTERED_GAMES[self.game_slug](*args, **kwargs)
| devova/SimpleCommander | command_server/generic/game/base.py | Python | mit | 2,994 |
from exp.views.analytics import * # noqa
from exp.views.contact import * # noqa
from exp.views.dashboard import * # noqa
from exp.views.lab import * # noqa
from exp.views.responses import * # noqa
from exp.views.study import * # noqa
from exp.views.user import * # noqa
from exp.views.video import * # noqa
| CenterForOpenScience/lookit-api | exp/views/__init__.py | Python | apache-2.0 | 315 |
#!/usr/bin/env python
"""Installer for radeontray program
Copyright 2012-2013 Francisco Pina Martins <f.pinamartins@gmail.com>
and Mirco Tracolli.
This file is part of Radeon-tray.
Radeon-tray is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Radeon-tray is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Radeon-tray. If not, see <http://www.gnu.org/licenses/>.
"""
from setuptools import setup
import radeontray
import os, sys
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
def main():
r_pyqt = False
r_zmq = False
try:
import PyQt4
r_pyqt = True
except ImportError:
print("This program requires PyQt4")
try:
import zmq
r_zmq = True
except ImportError:
print("This program requires pyzmq version >= 13.0.0")
if not r_pyqt or not r_zmq:
sys.exit(1)
setup(
name='Radeon-tray',
zip_safe=False,
#Include data from MANIFEST.in
#include_package_data = True,
version=radeontray.__version__,
author="Pina Martins",
author_email='f.pinamartins@gmail.com',
# To add
#long_description=read('README'),
description='A small program to control the power profiles of your Radeon card via systray icon.',
url='https://github.com/StuntsPT/Radeon-tray',
license="GPLv3",
keywords = "radeon tray icon",
#setup_requires=['pyzmq>=13.0.0', 'PyQt4'],
dependency_links = ['http://sourceforge.net/projects/pyqt/files/latest/download?source=files'],
packages=['radeontray'],
package_data={'radeontray':
['assets/*.svg', 'devel/*.py', 'systemd/*.service', 'conf/*.desktop']
},
#scripts=SCRIPTS,
#data_files=DATA_FILES,
entry_points={
'console_scripts': [
'radeontray = radeontray.mainfunctions:client',
'radeontrayserver = radeontray.mainfunctions:server'
]
},
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"Topic :: System :: Monitoring",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)"
]
)
if __name__ == '__main__':
sys.exit(main())
| StuntsPT/Radeon-tray | setup.py | Python | gpl-3.0 | 2,772 |
from django.conf import settings
from django.shortcuts import render
from django.db.models import F
import django_tables2 as tables
import django_filters as filters
from django_filters.views import FilterView
from django_peeringdb.models import Network, IXLan, InternetExchange
from peercollect.models import Peering
from itertools import groupby
class NetworkSummaryTable(tables.Table):
class Meta:
model = Network
fields = (
"asn",
"name",
"info_traffic",
"info_scope",
"policy_general",
"asrank__rank",
)
class NetworkFilter(filters.FilterSet):
name = filters.CharFilter(lookup_expr="contains")
present_in = filters.ModelMultipleChoiceFilter(
label="Present in",
field_name="netixlan_set__ixlan__ix",
queryset=InternetExchange.objects.filter(
ixlan_set__netixlan_set__net__asn=settings.OUR_ASN
).order_by("name"),
)
class Meta:
model = Network
fields = (
"asn",
"name",
"info_traffic",
"info_scope",
"policy_general",
)
class FilteredNetworkList(tables.SingleTableMixin, FilterView):
model = Network
ordering = ["asn"]
table_class = NetworkSummaryTable
filterset_class = NetworkFilter
template_name = "prospects.html"
class PeeringTable(tables.Table):
class Meta:
model = Peering
fields = (
"netixlan__net__asn",
"netixlan__net__name",
"netixlan__ixlan__ix",
"router",
)
class PeeringFilter(filters.FilterSet):
asn = filters.NumberFilter(label="ASN", field_name="netixlan__net__asn")
ixp = filters.ModelMultipleChoiceFilter(
label="Internet Exchange",
field_name="netixlan__ixlan__ix",
queryset=InternetExchange.objects.filter(
ixlan_set__netixlan_set__peering__isnull=False
)
.distinct()
.order_by("name"),
)
class PeeringList(tables.SingleTableMixin, FilterView):
model = Peering
ordering = ["netixlan__ixlan__ix", "netixlan__net__asn"]
table_class = PeeringTable
filterset_class = PeeringFilter
template_name = "peerings.html"
def home(request):
us = Network.objects.get(asn=settings.OUR_ASN)
our_lans = IXLan.objects.filter(netixlan_set__net=us)
our_ix = InternetExchange.objects.filter(ixlan_set__in=our_lans)
our_ix = our_ix.order_by("name")
common_nets = Network.objects.all()
common_nets = common_nets.filter(netixlan_set__ixlan__in=our_lans)
common_nets = common_nets.annotate(ix=F("netixlan_set__ixlan__ix"))
common_nets = common_nets.annotate(
peering=F("netixlan_set__peering__netixlan__ixlan__ix")
)
common_nets = common_nets.distinct()
common_nets = common_nets.order_by("policy_general", "asn")
values = common_nets.values(
"name", "asn", "policy_general", "info_traffic", "ix", "peering",
)
nets = []
for k, g in groupby(values, key=lambda n: n["asn"]):
groups = list(g)
# be DRY and copy all the keys from values
net = groups[0]
# override ix/peering with a list of all of their values
for combined in ("ix", "peering"):
net[combined] = [i[combined] for i in groups if i[combined] is not None]
# if we already have peerings established on all the potential IXPs
# with this peer, skip it; should this be made configuratble?
if set(net["ix"]) == set(net["peering"]):
continue
# if we have no peerings at all with this peer, skip it
# (this belongs in a separate view, most probably)
# FIXME
# if len(net['peering']) == 0:
# continue
nets.append(net)
context = {
"us": us,
"ixps": our_ix,
"nets": nets,
}
return render(request, "home.html", context)
| paravoid/peerassist | peermatch/views.py | Python | apache-2.0 | 3,980 |
"""
Views which allow users to create and activate accounts.
"""
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from registration.forms import RegistrationFormUniqueEmail
from registration.models import RegistrationProfile
def activate(request, activation_key,
template_name='registration/activate.html',
extra_context=None):
"""
Activate a ``User``'s account from an activation key, if their key
is valid and hasn't expired.
By default, use the template ``registration/activate.html``; to
change this, pass the name of a template as the keyword argument
``template_name``.
**Required arguments**
``activation_key``
The activation key to validate and use for activating the
``User``.
**Optional arguments**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``template_name``
A custom template to use.
**Context:**
``account``
The ``User`` object corresponding to the account, if the
activation was successful. ``False`` if the activation was not
successful.
``expiration_days``
The number of days for which activation keys stay valid after
registration.
Any extra variables supplied in the ``extra_context`` argument
(see above).
**Template:**
registration/activate.html or ``template_name`` keyword argument.
"""
activation_key = activation_key.lower() # Normalize before trying anything with it.
account = RegistrationProfile.objects.activate_user(activation_key)
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{ 'account': account,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS },
context_instance=context)
def register(request, success_url=None,
form_class=RegistrationFormUniqueEmail, profile_callback=None,
template_name='registration/registration_form.html',
extra_context=None):
"""
Allow a new user to register an account.
Following successful registration, issue a redirect; by default,
this will be whatever URL corresponds to the named URL pattern
``registration_complete``, which will be
``/accounts/register/complete/`` if using the included URLConf. To
change this, point that named pattern at another URL, or pass your
preferred URL as the keyword argument ``success_url``.
By default, ``registration.forms.RegistrationForm`` will be used
as the registration form; to change this, pass a different form
class as the ``form_class`` keyword argument. The form class you
specify must have a method ``save`` which will create and return
the new ``User``, and that method must accept the keyword argument
``profile_callback`` (see below).
To enable creation of a site-specific user profile object for the
new user, pass a function which will create the profile object as
the keyword argument ``profile_callback``. See
``RegistrationManager.create_inactive_user`` in the file
``models.py`` for details on how to write this function.
By default, use the template
``registration/registration_form.html``; to change this, pass the
name of a template as the keyword argument ``template_name``.
**Required arguments**
None.
**Optional arguments**
``form_class``
The form class to use for registration.
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``profile_callback``
A function which will be used to create a site-specific
profile instance for the new ``User``.
``success_url``
The URL to redirect to on successful registration.
``template_name``
A custom template to use.
**Context:**
``form``
The registration form.
Any extra variables supplied in the ``extra_context`` argument
(see above).
**Template:**
registration/registration_form.html or ``template_name`` keyword
argument.
"""
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
new_user = form.save(request, profile_callback=profile_callback)
# success_url needs to be dynamically generated here; setting a
# a default value using reverse() will cause circular-import
# problems with the default URLConf for this application, which
# imports this file.
return HttpResponseRedirect(success_url or reverse('registration_complete'))
else:
form = form_class()
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{ 'form': form },
context_instance=context)
| umitproject/openmonitor-aggregator | registration/views.py | Python | agpl-3.0 | 5,801 |
# minent.settings
# Settings module for the Minimum Entropy application
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Tue Jul 05 14:10:19 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: __init__.py [737e142] benjamin@bengfort.com $
"""
Settings module for the Minimum Entropy application
"""
##########################################################################
## Imports
##########################################################################
| DistrictDataLabs/minimum-entropy | minent/settings/__init__.py | Python | apache-2.0 | 542 |
#! /usr/bin/env python
import sys
import random
import math
from sets import Set
# adds node attribute of which shard node should be placed on
num_shards = 8
num_runs = 1
capacity = 84000/num_shards
assignments = dict()
shard_sizes = [0] * num_shards
LDG = True
G = {}
def load(argv):
assert(len(argv) == 2)
print 'loading graph from file'
inputfile = open(argv[1], 'r')
for line in inputfile:
if line[0] == '#': # ignore comments
continue
edge = line.split()
assert(len(edge) == 2)
n0 = int(edge[0])
n1 = int(edge[1])
if n0 not in G:
G[n0] = Set([])
if n1 not in G:
G[n1] = Set([])
G[n0].add(n1)
inputfile.close()
def get_balanced_assignment(tied_shards):
min_size = shard_sizes[tied_shards[0]] #pick one as min
min_indices = []
for s in tied_shards:
if shard_sizes[s] < min_size:
min_size = shard_sizes[s]
min_indices = [s]
elif shard_sizes[s] == min_size:
min_indices.append(s)
assert(len(min_indices) > 0)
return random.choice(min_indices)
def penalty(shard):
return 1.0 - (float(shard_sizes[shard])/float(capacity))
def get_intersection_scores(node):
shard_scores = [0] * num_shards
for nbr in G[node]:
if nbr in assignments:
shard_scores[assignments[nbr]] += 1
return shard_scores
def clustering_multiplier(num_mutual_friends):
return math.log(2 + num_mutual_friends)
def calc_mutual_friends(n1, n2):
return len(G[n1] & G[n2])
def get_clustering_scores(node):
shard_scores = [0] * num_shards
for nbr in G[node]:
if nbr in assignments:
mutual_friends = calc_mutual_friends(node, nbr)
shard_scores[assignments[nbr]] += clustering_multiplier(mutual_friends)
return shard_scores
def get_ldg_assignment(node):
if LDG:
shard_scores = get_intersection_scores(node)
else:
shard_scores = get_clustering_scores(node)
arg_max = 0.0
max_indices = []
for i in range(num_shards):
val = (float(shard_scores[i])*penalty(i))
if arg_max < val:
arg_max = val
max_indices = [i]
elif arg_max == val:
max_indices.append(i)
assert(len(max_indices) > 0)
if len(max_indices) is 1:
return max_indices[0]
else:
return get_balanced_assignment(max_indices)
def get_hash_assignment(node):
return node % num_shards
print 'partitioning graph onto ' + str(num_shards) + ' shards using LDG with a capacity constant of ' + str(capacity)
load(sys.argv)
for run in range(num_runs):
moved = 0
for n in G:
orig_loc = -1
if n in assignments:
shard_sizes[assignments[n]] -= 1
orig_loc = assignments[n]
put_on_shard = get_ldg_assignment(n)
#put_on_shard = get_hash_assignment(n)
assignments[n] = put_on_shard
shard_sizes[put_on_shard] += 1
if orig_loc != -1 and orig_loc != put_on_shard:
moved += 1
print 'Completed run ' + str(run) + ', moved node count = ' + str(moved)
print shard_sizes
'''
colors = [float(assignments[n])/float(num_shards) for n in G.nodes()]
print 'trying to draw graph...'
nx.draw_circular(G, node_color=colors)
plt.show()
'''
fname = sys.argv[1].rsplit('.',1)
if len(fname) == 1:
fileout = open(fname[0] + '-partitioned.', 'w')
else:
fileout = open(fname[0] + '-partitioned.' + fname[1], 'w')
fileout.write('#' + str(len(assignments)) + '\n')
for (k,v) in assignments.iteritems():
fileout.write(str(k) + ' ' + str(v) + '\n')
for n in G:
for nbr in G[n]:
line = str(n) + ' ' + str(nbr)
if random.random() > 0.9:
line += ' color blue\n'
else:
line += '\n'
fileout.write(line)
fileout.close()
print 'finshed writing assignments'
| Determinant/weaver | tests/static_partitioning/stream_partition.py | Python | bsd-3-clause | 3,924 |
#!/usr/bin/env python
import sys
import oauth2 as oauth
import time
import json
import urllib
TWEET_SIZE = 140
INITIAL_MSG = ".@NSAGov @stupidhackathon I just want a safer America"
TWEET_PREFIX = ".@NSAGov @stupidhackathon "
RATE_BUFFER_SECS = 7.0
# Set up instances of our Token and Consumer. The Consumer.key and
# Consumer.secret are given to you by the API provider. The Token.key and
# Token.secret is given to you after a three-legged authentication.
token = oauth.Token(key="4882253895-arMyoLh08m1gsYlDZP4mrHAFilc5IuS4grHvsF3", secret="BcbeqAu5cgdrfU5i48nQblkASUNKfiy1SlEhVDNZRDcGq")
consumer = oauth.Consumer(key="XgOrRE1xFOtw18Tixd7j6sl2G", secret="TiDS6b0fkmw6SwcnyDTtTRIkIjjUT7RPaI9C97aDRJnb4Tb3mX")
client = oauth.Client(consumer, token)
buffer = ''
def got_key(c):
global buffer
buffer += c
if len(buffer) == TWEET_SIZE - len(TWEET_PREFIX):
tweet(TWEET_PREFIX + buffer)
buffer = ''
pass
go_time, last_text = time.time(), None
def tweet(text):
global go_time
if time.time() < go_time:
return
# Twitter ignores duplicate tweets, so don't bother sending
global last_text
if text == last_text:
print >>sys.stderr, "Dropping duplicate tweet"
return
last_text = text
# Make request!
url = "https://api.twitter.com/1.1/statuses/update.json"
body = {'status': text}
headers = {'Content-type': 'application/x-www-form-urlencoded'}
resp_headers, content = client.request(url, method='POST', headers=headers, body=urllib.urlencode(body))
if int(resp_headers['status']) != 200:
print >>sys.stderr, "Got", str(resp_headers['status']+':'), content
if resp_headers['status'] == 429 or resp_headers.get('x-rate-limit-remaining', 1) == 0:
print >>sys.stderr, 'Dropping until', go_time
go_time = float(resp_headers['x-rate-limit-remaining']) + RATE_BUFFER_SECS
def run():
if len(INITIAL_MSG):
tweet(INITIAL_MSG)
c = sys.stdin.read(1)
while len(c) > 0:
if len(c.strip()) > 0:
got_key(c.strip())
c = sys.stdin.read(1)
if __name__ == '__main__':
run()
| komizutama/OptIn | publisher.py | Python | artistic-2.0 | 2,143 |
import angr
import claripy
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_i386():
p = angr.Project(os.path.join(test_location, 'i386', 'test_strcasecmp'), auto_load_libs=False)
arg1 = claripy.BVS('arg1', 20*8)
s = p.factory.entry_state(args=("test_strcasecmp", arg1))
sm = p.factory.simulation_manager(s)
sm.explore()
sm.move('deadended', 'found', filter_func=lambda s: b"Welcome" in s.posix.dumps(1))
assert len(sm.found) == 1
f = sm.found[0]
sol = f.solver.eval(arg1, cast_to=bytes)
assert b'\x00' in sol
assert sol[:sol.index(b'\x00')].lower() == b'letmein'
assert b'wchar works' in f.posix.dumps(1)
if __name__ == "__main__":
test_i386()
| angr/angr | tests/test_strcasecmp.py | Python | bsd-2-clause | 780 |
# Copyright (C) 2009-2012 Lars Wirzenius
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''A generic plugin manager.
The plugin manager finds files with plugins and loads them. It looks
for plugins in a number of locations specified by the caller. To add
a plugin to be loaded, it is enough to put it in one of the locations,
and name it *_plugin.py. (The naming convention is to allow having
other modules as well, such as unit tests, in the same locations.)
'''
import imp
import inspect
import os
class Plugin(object):
'''Base class for plugins.
A plugin MUST NOT have any side effects when it is instantiated.
This is necessary so that it can be safely loaded by unit tests,
and so that a user interface can allow the user to disable it,
even if it is installed, with no ill effects. Any side effects
that would normally happen should occur in the enable() method,
and be undone by the disable() method. These methods must be
callable any number of times.
The subclass MAY define the following attributes:
* name
* description
* version
* required_application_version
name is the user-visible identifier for the plugin. It defaults
to the plugin's classname.
description is the user-visible description of the plugin. It may
be arbitrarily long, and can use pango markup language. Defaults
to the empty string.
version is the plugin version. Defaults to '0.0.0'. It MUST be a
sequence of integers separated by periods. If several plugins with
the same name are found, the newest version is used. Versions are
compared integer by integer, starting with the first one, and a
missing integer treated as a zero. If two plugins have the same
version, either might be used.
required_application_version gives the version of the minimal
application version the plugin is written for. The first integer
must match exactly: if the application is version 2.3.4, the
plugin's required_application_version must be at least 2 and
at most 2.3.4 to be loaded. Defaults to 0.
'''
@property
def name(self):
return self.__class__.__name__
@property
def description(self):
return ''
@property
def version(self):
return '0.0.0'
@property
def required_application_version(self):
return '0.0.0'
def setup(self):
'''Setup plugin.
This is called at plugin load time. It should not yet enable the
plugin (the ``enable`` method does that), but it might do things
like add itself into a hook that adds command line arguments
to the application.
'''
def enable_wrapper(self):
'''Enable plugin.
The plugin manager will call this method, which then calls the
enable method. Plugins should implement the enable method.
The wrapper method is there to allow an application to provide
an extended base class that does some application specific
magic when plugins are enabled or disabled.
'''
self.enable()
def disable_wrapper(self):
'''Corresponds to enable_wrapper, but for disabling a plugin.'''
self.disable()
def enable(self):
'''Enable the plugin.'''
raise NotImplemented()
def disable(self):
'''Disable the plugin.'''
raise NotImplemented()
| perryl/morph | cliapp/plugin.py | Python | gpl-2.0 | 4,073 |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module_value1 = 5
additiv_global = '*' * 1000
def calledRepeatedly():
# Force frame and eliminate forward propagation (currently).
module_value1
# Make sure we have a local variable s anyway
s = '2'
# Add an unknown, making 's' an unknown.
additiv = additiv_global
s += additiv
# construct_begin
s += additiv
s += additiv
s += additiv
s += additiv
s += additiv
# construct_end
# Prevent optimization into direct return.
s += additiv
return s
import itertools
for x in itertools.repeat(None, 10000):
calledRepeatedly()
print("OK.")
| kayhayen/Nuitka | tests/benchmarks/constructs/InplaceOperationStringAdd.py | Python | apache-2.0 | 1,500 |
#! /usr/bin/env python2
from SimpleHTTPServer import SimpleHTTPRequestHandler
import BaseHTTPServer
class CORSRequestHandler (SimpleHTTPRequestHandler):
def end_headers (self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
if __name__ == '__main__':
BaseHTTPServer.test(CORSRequestHandler, BaseHTTPServer.HTTPServer)
| warehouseman/trello-swagger-generator | dev/http_cors_server.py | Python | mit | 394 |
import _plotly_utils.basevalidators
class HovertextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hovertextsrc", parent_name="contour", **kwargs):
super(HovertextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/contour/_hovertextsrc.py | Python | mit | 453 |
##############################################################################
#
# Copyright (c) 2008-2010 SIA "KN dati". (http://kndati.lv) All Rights Reserved.
# General contacts <info@kndati.lv>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
# Modified template model from:
#
# Micronaet s.r.l. - Nicola Riolini
# Using the same term of use
##############################################################################
from osv import osv, fields
from datetime import datetime
import decimal_precision as dp
import tools, os
import base64, urllib
class inherit_product_product(osv.osv):
_name = 'product.product'
_inherit = 'product.product'
def get_quotation_image(self, cr, uid, item, context=None):
''' Get single image for the file
(default path is ~/photo/db_name/quotation
'''
img = ''
extension = "jpg"
image_path = os.path.expanduser("~/photo/%s/product/default"%(cr.dbname))
empty_image= "%s/%s.%s"%(image_path, "empty", extension)
product_browse=self.browse(cr, uid, item, context=context)
# Image compoesed with code format (code.jpg)
if product_browse.default_code:
try:
(filename, header) = urllib.urlretrieve("%s/%s.%s"%(image_path, product_browse.default_code.replace(" ", "_"), extension)) # code image
f = open(filename , 'rb')
img = base64.encodestring(f.read())
f.close()
except:
img = ''
if not img: # empty image:
try:
(filename, header) = urllib.urlretrieve(empty_image) # empty setted up on folder
f = open(filename , 'rb')
img = base64.encodestring(f.read())
f.close()
except:
img = ''
return img
# Fields function:
def _get_quotation_image(self, cr, uid, ids, field_name, arg, context=None):
''' Field function, for every ids test if there's image and return
base64 format according to code value (images are jpg)
'''
res = {}
for item in ids:
res[item] = self.get_quotation_image(cr, uid, item, context=context)
return res
_columns = {
'quotation_photo':fields.function(_get_quotation_image, type="binary", method=True),
'quantity_x_pack': fields.integer('Q. per pack'),
}
_defaults = {
'quantity_x_pack': lambda *a: 1,
}
inherit_product_product()
class sale_order_extra(osv.osv):
"""
sale.order extra fields
"""
_inherit = 'sale.order'
_name = 'sale.order'
_columns = {
'quotation_model':fields.selection([(1,'Offerta dettagliata (q.-sconto-subtotali)'),
(2,'Offerta breve (solo q.)'),],'Model', readonly=False, required=True),
}
_defaults = {
'quotation_model': lambda *x: 2, # short
}
sale_order_extra()
class sale_order_line_add_fields(osv.osv):
_name='sale.order.line'
_inherit='sale.order.line'
_columns={
'repeat_header_line': fields.boolean('Intest.', required=False, help="Spuntare quando e' richiesta l'intestazione, tipo dopo una riga titolo."),
'insert_photo': fields.boolean('Con foto', required=False, help="Spuntare quando e' richiesto l'inserimento della foto a preventivo."),
'use_amazon_description': fields.boolean('Amazon description', required=False, help="Take amazon description instead of product's one"),
'show_notes': fields.boolean('Show notes', required=False, help="Show notes after description"),
}
sale_order_line_add_fields()
| Micronaet/micronaet-migration | __UNPORTED__/report_aeroo_gpb/order_line.py | Python | agpl-3.0 | 5,034 |
# Copyright (c) 2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
import re
from . import AWSObject, AWSProperty, Tags
from .validators import boolean, integer, network_port
def validate_node_group_id(node_group_id):
if re.match(r'\d{1,4}', node_group_id):
return node_group_id
raise ValueError("Invalid NodeGroupId: %s" % node_group_id)
class CacheCluster(AWSObject):
resource_type = "AWS::ElastiCache::CacheCluster"
props = {
'AutoMinorVersionUpgrade': (boolean, False),
'AZMode': (basestring, False),
'CacheNodeType': (basestring, True),
'CacheParameterGroupName': (basestring, False),
'CacheSecurityGroupNames': ([basestring], False),
'CacheSubnetGroupName': (basestring, False),
'ClusterName': (basestring, False),
'Engine': (basestring, True),
'EngineVersion': (basestring, False),
'NotificationTopicArn': (basestring, False),
'NumCacheNodes': (integer, True),
'Port': (integer, False),
'PreferredAvailabilityZone': (basestring, False),
'PreferredAvailabilityZones': ([basestring], False),
'PreferredMaintenanceWindow': (basestring, False),
'SnapshotArns': ([basestring], False),
'SnapshotName': (basestring, False),
'SnapshotRetentionLimit': (integer, False),
'SnapshotWindow': (basestring, False),
'Tags': (Tags, False),
'VpcSecurityGroupIds': ([basestring], False),
}
def validate(self):
# Check that AZMode is "cross-az" if more than one Availability zone
# is specified in PreferredAvailabilityZones
preferred_azs = self.properties.get('PreferredAvailabilityZones')
if preferred_azs is not None and \
isinstance(preferred_azs, list) and \
len(preferred_azs) > 1:
if self.properties.get('AZMode') != 'cross-az':
raise ValueError('AZMode must be "cross-az" if more than one a'
'vailability zone is specified in PreferredAv'
'ailabilityZones: http://docs.aws.amazon.com/'
'AWSCloudFormation/latest/UserGuide/aws-prope'
'rties-elasticache-cache-cluster.html#cfn-ela'
'sticache-cachecluster-azmode')
return True
class ParameterGroup(AWSObject):
resource_type = "AWS::ElastiCache::ParameterGroup"
props = {
'CacheParameterGroupFamily': (basestring, True),
'Description': (basestring, True),
'Properties': (dict, True),
}
class SecurityGroup(AWSObject):
resource_type = "AWS::ElastiCache::SecurityGroup"
props = {
'Description': (basestring, False),
}
class SecurityGroupIngress(AWSObject):
resource_type = "AWS::ElastiCache::SecurityGroupIngress"
props = {
'CacheSecurityGroupName': (basestring, True),
'EC2SecurityGroupName': (basestring, True),
'EC2SecurityGroupOwnerId': (basestring, False),
}
class SubnetGroup(AWSObject):
resource_type = "AWS::ElastiCache::SubnetGroup"
props = {
'CacheSubnetGroupName': (basestring, False),
'Description': (basestring, True),
'SubnetIds': (list, True),
}
class ReplicationGroup(AWSObject):
resource_type = "AWS::ElastiCache::ReplicationGroup"
props = {
'AtRestEncryptionEnabled': (boolean, False),
'AuthToken': (basestring, False),
'AutoMinorVersionUpgrade': (boolean, False),
'AutomaticFailoverEnabled': (boolean, False),
'CacheNodeType': (basestring, False),
'CacheParameterGroupName': (basestring, False),
'CacheSecurityGroupNames': ([basestring], False),
'CacheSubnetGroupName': (basestring, False),
'Engine': (basestring, False),
'EngineVersion': (basestring, False),
'KmsKeyId': (basestring, False),
'NodeGroupConfiguration': (list, False),
'NotificationTopicArn': (basestring, False),
'NumCacheClusters': (integer, False),
'NumNodeGroups': (integer, False),
'Port': (network_port, False),
'PreferredCacheClusterAZs': ([basestring], False),
'PreferredMaintenanceWindow': (basestring, False),
'PrimaryClusterId': (basestring, False),
'ReplicasPerNodeGroup': (integer, False),
'ReplicationGroupDescription': (basestring, True),
'ReplicationGroupId': (basestring, False),
'SecurityGroupIds': ([basestring], False),
'SnapshotArns': ([basestring], False),
'SnapshotName': (basestring, False),
'SnapshotRetentionLimit': (integer, False),
'SnapshottingClusterId': (basestring, False),
'SnapshotWindow': (basestring, False),
'Tags': (Tags, False),
'TransitEncryptionEnabled': (boolean, False),
}
def validate(self):
if 'NumCacheClusters' not in self.properties and \
'NumNodeGroups' not in self.properties and \
'ReplicasPerNodeGroup' not in self.properties and \
'PrimaryClusterId' not in self.properties:
raise ValueError(
'One of PrimaryClusterId, NumCacheClusters, '
'NumNodeGroups or ReplicasPerNodeGroup are required'
'in type AWS::ElastiCache::ReplicationGroup'
)
return True
class NodeGroupConfiguration(AWSProperty):
props = {
'NodeGroupId': (validate_node_group_id, False),
'PrimaryAvailabilityZone': (basestring, False),
'ReplicaAvailabilityZones': ([basestring], False),
'ReplicaCount': (integer, False),
'Slots': (basestring, False),
}
| ikben/troposphere | troposphere/elasticache.py | Python | bsd-2-clause | 5,785 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
from st2common.models.db.trace import TraceDB, TraceComponentDB
from st2common.persistence.trace import Trace
from st2tests.base import CleanDbTestCase
class TraceDBTest(CleanDbTestCase):
def test_get(self):
saved = TraceDBTest._create_save_trace(
trace_tag='test_trace',
action_executions=[str(bson.ObjectId()) for _ in range(4)],
rules=[str(bson.ObjectId()) for _ in range(4)],
trigger_instances=[str(bson.ObjectId()) for _ in range(5)])
retrieved = Trace.get(id=saved.id)
self.assertEquals(retrieved.id, saved.id, 'Incorrect trace retrieved.')
def test_query(self):
saved = TraceDBTest._create_save_trace(
trace_tag='test_trace',
action_executions=[str(bson.ObjectId()) for _ in range(4)],
rules=[str(bson.ObjectId()) for _ in range(4)],
trigger_instances=[str(bson.ObjectId()) for _ in range(5)])
retrieved = Trace.query(trace_tag=saved.trace_tag)
self.assertEquals(len(retrieved), 1, 'Should have 1 trace.')
self.assertEquals(retrieved[0].id, saved.id, 'Incorrect trace retrieved.')
# Add another trace with same trace_tag and confirm that we support.
# This is most likley an anti-pattern for the trace_tag but it is an unknown.
saved = TraceDBTest._create_save_trace(
trace_tag='test_trace',
action_executions=[str(bson.ObjectId()) for _ in range(2)],
rules=[str(bson.ObjectId()) for _ in range(4)],
trigger_instances=[str(bson.ObjectId()) for _ in range(3)])
retrieved = Trace.query(trace_tag=saved.trace_tag)
self.assertEquals(len(retrieved), 2, 'Should have 2 traces.')
def test_update(self):
saved = TraceDBTest._create_save_trace(
trace_tag='test_trace',
action_executions=[],
rules=[],
trigger_instances=[])
retrieved = Trace.query(trace_tag=saved.trace_tag)
self.assertEquals(len(retrieved), 1, 'Should have 1 trace.')
self.assertEquals(retrieved[0].id, saved.id, 'Incorrect trace retrieved.')
no_action_executions = 4
no_rules = 4
no_trigger_instances = 5
saved = TraceDBTest._create_save_trace(
trace_tag='test_trace',
id_=retrieved[0].id,
action_executions=[str(bson.ObjectId()) for _ in range(no_action_executions)],
rules=[str(bson.ObjectId()) for _ in range(no_rules)],
trigger_instances=[str(bson.ObjectId()) for _ in range(no_trigger_instances)])
retrieved = Trace.query(trace_tag=saved.trace_tag)
self.assertEquals(len(retrieved), 1, 'Should have 1 trace.')
self.assertEquals(retrieved[0].id, saved.id, 'Incorrect trace retrieved.')
# validate update
self.assertEquals(len(retrieved[0].action_executions), no_action_executions,
'Failed to update action_executions.')
self.assertEquals(len(retrieved[0].rules), no_rules, 'Failed to update rules.')
self.assertEquals(len(retrieved[0].trigger_instances), no_trigger_instances,
'Failed to update trigger_instances.')
def test_update_via_list_push(self):
no_action_executions = 4
no_rules = 4
no_trigger_instances = 5
saved = TraceDBTest._create_save_trace(
trace_tag='test_trace',
action_executions=[str(bson.ObjectId()) for _ in range(no_action_executions)],
rules=[str(bson.ObjectId()) for _ in range(no_rules)],
trigger_instances=[str(bson.ObjectId()) for _ in range(no_trigger_instances)])
# push updates
Trace.push_action_execution(
saved, action_execution=TraceComponentDB(object_id=str(bson.ObjectId())))
Trace.push_rule(saved, rule=TraceComponentDB(object_id=str(bson.ObjectId())))
Trace.push_trigger_instance(
saved, trigger_instance=TraceComponentDB(object_id=str(bson.ObjectId())))
retrieved = Trace.get(id=saved.id)
self.assertEquals(retrieved.id, saved.id, 'Incorrect trace retrieved.')
self.assertEquals(len(retrieved.action_executions), no_action_executions + 1)
self.assertEquals(len(retrieved.rules), no_rules + 1)
self.assertEquals(len(retrieved.trigger_instances), no_trigger_instances + 1)
def test_update_via_list_push_components(self):
no_action_executions = 4
no_rules = 4
no_trigger_instances = 5
saved = TraceDBTest._create_save_trace(
trace_tag='test_trace',
action_executions=[str(bson.ObjectId()) for _ in range(no_action_executions)],
rules=[str(bson.ObjectId()) for _ in range(no_rules)],
trigger_instances=[str(bson.ObjectId()) for _ in range(no_trigger_instances)])
retrieved = Trace.push_components(
saved,
action_executions=[TraceComponentDB(object_id=str(bson.ObjectId()))
for _ in range(no_action_executions)],
rules=[TraceComponentDB(object_id=str(bson.ObjectId()))
for _ in range(no_rules)],
trigger_instances=[TraceComponentDB(object_id=str(bson.ObjectId()))
for _ in range(no_trigger_instances)])
self.assertEquals(retrieved.id, saved.id, 'Incorrect trace retrieved.')
self.assertEquals(len(retrieved.action_executions), no_action_executions * 2)
self.assertEquals(len(retrieved.rules), no_rules * 2)
self.assertEquals(len(retrieved.trigger_instances), no_trigger_instances * 2)
@staticmethod
def _create_save_trace(trace_tag, id_=None, action_executions=None, rules=None,
trigger_instances=None):
if action_executions is None:
action_executions = []
action_executions = [TraceComponentDB(object_id=action_execution)
for action_execution in action_executions]
if rules is None:
rules = []
rules = [TraceComponentDB(object_id=rule) for rule in rules]
if trigger_instances is None:
trigger_instances = []
trigger_instances = [TraceComponentDB(object_id=trigger_instance)
for trigger_instance in trigger_instances]
created = TraceDB(id=id_,
trace_tag=trace_tag,
trigger_instances=trigger_instances,
rules=rules,
action_executions=action_executions)
return Trace.add_or_update(created)
| alfasin/st2 | st2common/tests/unit/test_db_trace.py | Python | apache-2.0 | 7,466 |
"""
sentry.templatetags.sentry_helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# XXX: Import django-paging's template tags so we don't have to worry about
# INSTALLED_APPS
from __future__ import absolute_import
import os.path
import pytz
from collections import namedtuple
from datetime import timedelta
from paging.helpers import paginate as paginate_func
from pkg_resources import parse_version as Version
from urllib import quote
from django import template
from django.conf import settings
from django.template import RequestContext
from django.template.defaultfilters import stringfilter
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
import six
from six.moves import range
from sentry import options
from sentry.constants import EVENTS_PER_PAGE
from sentry.models import Organization
from sentry.web.helpers import group_is_public
from sentry.utils import to_unicode
from sentry.utils.avatar import get_gravatar_url
from sentry.utils.http import absolute_uri
from sentry.utils.javascript import to_json
from sentry.utils.safe import safe_execute
from sentry.utils.strings import truncatechars
from templatetag_sugar.register import tag
from templatetag_sugar.parser import Name, Variable, Constant, Optional
SentryVersion = namedtuple('SentryVersion', ['current', 'latest',
'update_available'])
register = template.Library()
truncatechars = register.filter(stringfilter(truncatechars))
truncatechars.is_safe = True
register.filter(to_json)
register.simple_tag(absolute_uri)
@register.filter
def pprint(value, break_after=10):
"""
break_after is used to define how often a <span> is
inserted (for soft wrapping).
"""
value = to_unicode(value)
return mark_safe(u'<span></span>'.join(
[escape(value[i:(i + break_after)]) for i in range(0, len(value), break_after)]
))
@register.filter
def is_url(value):
if not isinstance(value, six.string_types):
return False
if not value.startswith(('http://', 'https://')):
return False
if ' ' in value:
return False
return True
# seriously Django?
@register.filter
def subtract(value, amount):
return int(value) - int(amount)
@register.filter
def has_charts(group):
from sentry.utils.db import has_charts
if hasattr(group, '_state'):
db = group._state.db or 'default'
else:
db = 'default'
return has_charts(db)
@register.filter
def as_sorted(value):
return sorted(value)
@register.filter
def small_count(v):
if not v:
return 0
z = [
(1000000000, _('b')),
(1000000, _('m')),
(1000, _('k')),
]
v = int(v)
for x, y in z:
o, p = divmod(v, x)
if o:
if len(str(o)) > 2 or not p:
return '%d%s' % (o, y)
return '%.1f%s' % (v / float(x), y)
return v
@register.filter
def num_digits(value):
return len(str(value))
@register.filter
def to_str(data):
return str(data)
@register.filter
def is_none(value):
return value is None
@register.simple_tag(takes_context=True)
def get_sentry_version(context):
import sentry
current = sentry.VERSION
latest = options.get('sentry:latest_version') or current
update_available = Version(latest) > Version(current)
context['sentry_version'] = SentryVersion(
current, latest, update_available
)
return ''
@register.filter
def timesince(value, now=None):
from django.template.defaultfilters import timesince
if now is None:
now = timezone.now()
if not value:
return _('never')
if value < (now - timedelta(days=5)):
return value.date()
value = (' '.join(timesince(value, now).split(' ')[0:2])).strip(',')
if value == _('0 minutes'):
return _('just now')
if value == _('1 day'):
return _('yesterday')
return value + _(' ago')
@register.filter
def duration(value):
if not value:
return '0s'
# value is assumed to be in ms
value = value / 1000.0
hours, minutes, seconds = 0, 0, 0
if value > 3600:
hours = value / 3600
value = value % 3600
if value > 60:
minutes = value / 60
value = value % 60
seconds = value
output = []
if hours:
output.append('%dh' % hours)
if minutes:
output.append('%dm' % minutes)
if seconds > 1:
output.append('%0.2fs' % seconds)
elif seconds:
output.append('%dms' % (seconds * 1000))
return ''.join(output)
# XXX: this is taken from django-paging so that we may render
# a custom template, and not worry about INSTALLED_APPS
@tag(register, [Variable('queryset_or_list'),
Constant('from'), Variable('request'),
Optional([Constant('as'), Name('asvar')]),
Optional([Constant('per_page'), Variable('per_page')])])
def paginate(context, queryset_or_list, request, asvar=None, per_page=EVENTS_PER_PAGE):
"""{% paginate queryset_or_list from request as foo[ per_page 25] %}"""
result = paginate_func(request, queryset_or_list, per_page, endless=True)
context_instance = RequestContext(request)
paging = mark_safe(render_to_string('sentry/partial/_pager.html', result, context_instance))
result = dict(objects=result['paginator'].get('objects', []), paging=paging)
if asvar:
context[asvar] = result
return ''
return result
@tag(register, [Variable('queryset_or_list'),
Constant('from'), Variable('request'),
Optional([Constant('as'), Name('asvar')]),
Optional([Constant('per_page'), Variable('per_page')])])
def paginator(context, queryset_or_list, request, asvar=None, per_page=EVENTS_PER_PAGE):
"""{% paginator queryset_or_list from request as foo[ per_page 25] %}"""
result = paginate_func(request, queryset_or_list, per_page, endless=True)
if asvar:
context[asvar] = result
return ''
return result
@tag(register, [Constant('from'), Variable('request'),
Optional([Constant('without'), Name('withoutvar')]),
Optional([Constant('as'), Name('asvar')])])
def querystring(context, request, withoutvar, asvar=None):
params = request.GET.copy()
if withoutvar in params:
del params[withoutvar]
result = params.urlencode()
if asvar:
context[asvar] = result
return ''
return result
@register.inclusion_tag('sentry/partial/_form.html')
def render_form(form):
return {'form': form}
@register.filter
def as_bookmarks(group_list, user):
group_list = list(group_list)
if user.is_authenticated() and group_list:
project = group_list[0].project
bookmarks = set(project.bookmark_set.filter(
user=user,
group__in=group_list,
).values_list('group_id', flat=True))
else:
bookmarks = set()
for g in group_list:
yield g, g.pk in bookmarks
@register.filter
def is_bookmarked(group, user):
if user.is_authenticated():
return group.bookmark_set.filter(
user=user,
group=group,
).exists()
return False
@register.filter
def date(dt, arg=None):
from django.template.defaultfilters import date
if not timezone.is_aware(dt):
dt = dt.replace(tzinfo=timezone.utc)
return date(dt, arg)
@tag(register, [Constant('for'), Variable('user'),
Constant('from'), Variable('project'),
Constant('as'), Name('asvar')])
def get_project_dsn(context, user, project, asvar):
from sentry.models import ProjectKey
if not user.is_authenticated():
context[asvar] = None
return ''
try:
key = ProjectKey.objects.filter(project=project)[0]
except ProjectKey.DoesNotExist:
context[asvar] = None
else:
context[asvar] = key.get_dsn()
return ''
# Adapted from http://en.gravatar.com/site/implement/images/django/
# The "mm" default is for the grey, "mystery man" icon. See:
# http://en.gravatar.com/site/implement/images/
@tag(register, [Variable('email'),
Optional([Constant('size'), Variable('size')]),
Optional([Constant('default'), Variable('default')])])
def gravatar_url(context, email, size=None, default='mm'):
return get_gravatar_url(email, size, default)
@register.filter
def trim_schema(value):
return value.split('//', 1)[-1]
@register.filter
def with_metadata(group_list, request):
group_list = list(group_list)
if request.user.is_authenticated() and group_list:
project = group_list[0].project
bookmarks = set(project.bookmark_set.filter(
user=request.user,
group__in=group_list,
).values_list('group_id', flat=True))
else:
bookmarks = set()
# TODO(dcramer): this is obsolete and needs to pull from the tsdb backend
historical_data = {}
for g in group_list:
yield g, {
'is_bookmarked': g.pk in bookmarks,
'historical_data': ','.join(str(x[1]) for x in historical_data.get(g.id, [])),
}
@register.inclusion_tag('sentry/plugins/bases/tag/widget.html')
def render_tag_widget(group, tag):
cutoff = timezone.now() - timedelta(days=7)
return {
'title': tag['label'],
'tag_name': tag['key'],
'group': group,
}
@register.simple_tag
def percent(value, total):
if not (value and total):
return 0
return int(int(value) / float(total) * 100)
@register.filter
def titlize(value):
return value.replace('_', ' ').title()
@register.filter
def split(value, delim=''):
return value.split(delim)
@register.filter
def get_rendered_interfaces(event, request):
interface_list = []
is_public = group_is_public(event.group, request.user)
for interface in event.interfaces.itervalues():
html = safe_execute(interface.to_html, event, is_public=is_public)
if not html:
continue
interface_list.append((interface, mark_safe(html)))
return sorted(interface_list, key=lambda x: x[0].get_display_score(), reverse=True)
@register.inclusion_tag('sentry/partial/github_button.html')
def github_button(user, repo):
return {
'user': user,
'repo': repo,
}
@register.inclusion_tag('sentry/partial/data_values.html')
def render_values(value, threshold=5, collapse_to=3):
if isinstance(value, (list, tuple)):
value = list(enumerate(value))
is_list, is_dict = bool(value), True
else:
is_list, is_dict = False, isinstance(value, dict)
if is_dict:
value = sorted(value.iteritems())
context = {
'is_dict': is_dict,
'is_list': is_list,
'threshold': threshold,
'collapse_to': collapse_to,
}
if is_dict:
value_len = len(value)
over_threshold = value_len > threshold
if over_threshold:
context.update({
'over_threshold': over_threshold,
'hidden_values': value_len - collapse_to,
'value_before_expand': value[:collapse_to],
'value_after_expand': value[collapse_to:],
})
else:
context.update({
'over_threshold': over_threshold,
'hidden_values': 0,
'value_before_expand': value,
'value_after_expand': [],
})
else:
context['value'] = value
return context
@tag(register, [Constant('from'), Variable('project'),
Constant('as'), Name('asvar')])
def recent_alerts(context, project, asvar):
from sentry.models import Alert
context[asvar] = list(Alert.get_recent_for_project(project.id))
return ''
@register.filter
def urlquote(value, safe=''):
return quote(value.encode('utf8'), safe)
@register.filter
def basename(value):
return os.path.basename(value)
@register.filter
def user_display_name(user):
return user.first_name or user.username
@register.simple_tag(takes_context=True)
def localized_datetime(context, dt, format='DATETIME_FORMAT'):
request = context['request']
timezone = getattr(request, 'timezone', None)
if not timezone:
timezone = pytz.timezone(settings.SENTRY_DEFAULT_TIME_ZONE)
dt = dt.astimezone(timezone)
return date(dt, format)
@register.filter
def list_organizations(user):
return Organization.objects.get_for_user(user)
@register.filter
def needs_access_group_migration(user, organization):
from sentry.models import AccessGroup, OrganizationMember, OrganizationMemberType
has_org_access_queryset = OrganizationMember.objects.filter(
user=user,
organization=organization,
has_global_access=True,
type__lte=OrganizationMemberType.ADMIN,
)
if not (user.is_superuser or has_org_access_queryset.exists()):
return False
return AccessGroup.objects.filter(
team__organization=organization
).exists()
@register.filter
def count_pending_access_requests(organization):
from sentry.models import OrganizationAccessRequest
return OrganizationAccessRequest.objects.filter(
team__organization=organization,
).count()
| argonemyth/sentry | src/sentry/templatetags/sentry_helpers.py | Python | bsd-3-clause | 13,614 |
# coding=utf-8
# Copyright 2018 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Iterable, List, Optional, Sequence, Text # noqa: F401
import attr
from pybatfish.util import escape_html, escape_name
from .primitives import DataModelElement, Edge
from .route import NextHop
__all__ = [
"ArpErrorStepDetail",
"DelegatedToNextVrf",
"DeliveredStepDetail",
"Discarded",
"EnterInputIfaceStepDetail",
"ExitOutputIfaceStepDetail",
"FilterStepDetail",
"ForwardedIntoVxlanTunnel",
"ForwardedOutInterface",
"ForwardingDetail",
"Flow",
"HeaderConstraints",
"Hop",
"InboundStepDetail",
"MatchSessionStepDetail",
"MatchTcpFlags",
"OriginateStepDetail",
"RoutingStepDetail",
"SetupSessionStepDetail",
"PathConstraints",
"TcpFlags",
"Trace",
"TransformationStepDetail",
]
def _optional_int(x):
# type: (Any) -> Optional[int]
if x is None:
return None
return int(x)
@attr.s(frozen=True)
class Flow(DataModelElement):
"""A concrete IPv4 flow.
Noteworthy attributes for flow inspection/filtering:
:ivar srcIP: Source IP of the flow
:ivar dstIP: Destination IP of the flow
:ivar srcPort: Source port of the flow
:ivar dstPort: Destination port of the flow
:ivar ipProtocol: the IP protocol of the flow either as its name (e.g., TCP) for well-known protocols or a string like UNNAMED_168
:ivar ingressNode: the node where the flow started (or entered the network)
:ivar ingressInterface: the interface name where the flow started (or entered the network)
:ivar ingressVrf: the VRF name where the flow started (or entered the network)
"""
dscp = attr.ib(type=int, converter=int)
dstIp = attr.ib(type=str, converter=str)
dstPort = attr.ib(type=Optional[int], converter=_optional_int)
ecn = attr.ib(type=int, converter=int)
fragmentOffset = attr.ib(type=int, converter=int)
icmpCode = attr.ib(type=Optional[int], converter=_optional_int)
icmpVar = attr.ib(type=Optional[int], converter=_optional_int)
ingressInterface = attr.ib(type=Optional[str])
ingressNode = attr.ib(type=Optional[str])
ingressVrf = attr.ib(type=Optional[str])
ipProtocol = attr.ib(type=str)
packetLength = attr.ib(type=str)
srcIp = attr.ib(type=str, converter=str)
srcPort = attr.ib(type=Optional[int], converter=_optional_int)
tcpFlagsAck = attr.ib(type=Optional[int], converter=_optional_int)
tcpFlagsCwr = attr.ib(type=Optional[int], converter=_optional_int)
tcpFlagsEce = attr.ib(type=Optional[int], converter=_optional_int)
tcpFlagsFin = attr.ib(type=Optional[int], converter=_optional_int)
tcpFlagsPsh = attr.ib(type=Optional[int], converter=_optional_int)
tcpFlagsRst = attr.ib(type=Optional[int], converter=_optional_int)
tcpFlagsSyn = attr.ib(type=Optional[int], converter=_optional_int)
tcpFlagsUrg = attr.ib(type=Optional[int], converter=_optional_int)
IP_PROTOCOL_PATTERN = re.compile("^UNNAMED_([0-9]+)$", flags=re.IGNORECASE)
@classmethod
def from_dict(cls, json_dict: Dict[str, Any]) -> "Flow":
return Flow(
json_dict["dscp"],
json_dict["dstIp"],
json_dict.get("dstPort"),
json_dict["ecn"],
json_dict["fragmentOffset"],
json_dict.get("icmpCode"),
json_dict.get("icmpVar"),
json_dict.get("ingressInterface"),
json_dict.get("ingressNode"),
json_dict.get("ingressVrf"),
json_dict["ipProtocol"],
json_dict["packetLength"],
json_dict["srcIp"],
json_dict.get("srcPort"),
json_dict.get("tcpFlagsAck"),
json_dict.get("tcpFlagsCwr"),
json_dict.get("tcpFlagsEce"),
json_dict.get("tcpFlagsFin"),
json_dict.get("tcpFlagsPsh"),
json_dict.get("tcpFlagsRst"),
json_dict.get("tcpFlagsSyn"),
json_dict.get("tcpFlagsUrg"),
)
def __str__(self):
# type: () -> str
iface_str = self._iface_str()
vrf_str = self._vrf_str()
return (
"start={node}{iface}{vrf} [{src}->{dst}"
" {ip_proto}{dscp}{ecn}{offset}{length}]".format(
node=self.ingressNode,
iface=iface_str,
vrf=vrf_str,
src=self._ip_port(self.srcIp, self.srcPort),
dst=self._ip_port(self.dstIp, self.dstPort),
ip_proto=self.get_ip_protocol_str(),
dscp=(" dscp={}".format(self.dscp) if self.dscp != 0 else ""),
ecn=(" ecn={}".format(self.ecn) if self.ecn != 0 else ""),
offset=(
" fragmentOffset={}".format(self.fragmentOffset)
if self.fragmentOffset != 0
else ""
),
length=(
" length={}".format(self.packetLength)
if self.packetLength != 512 # Batfish default
else ""
),
)
)
def _vrf_str(self):
vrf_str = (
" vrf={}".format(self.ingressVrf)
if self.ingressVrf not in ["default", None]
else ""
)
return vrf_str
def _iface_str(self):
iface_str = (
" interface={}".format(self.ingressInterface)
if self.ingressInterface is not None
else ""
)
return iface_str
def get_flag_str(self):
# type: () -> str
"""
Returns a print friendly version of all set TCP flags.
"""
flags = []
# ordering heuristics: common flags first, common combinations (SYN-ACK, FIN-ACK) print nicely
if self.tcpFlagsSyn:
flags.append("SYN")
if self.tcpFlagsFin:
flags.append("FIN")
if self.tcpFlagsAck:
flags.append("ACK")
if self.tcpFlagsRst:
flags.append("RST")
if self.tcpFlagsCwr:
flags.append("CWR")
if self.tcpFlagsEce:
flags.append("ECE")
if self.tcpFlagsPsh:
flags.append("PSH")
if self.tcpFlagsUrg:
flags.append("URG")
return "-".join(flags) if len(flags) > 0 else "no flags set"
def get_ip_protocol_str(self):
# type: () -> str
"""Returns a print-friendly version of IP protocol and any protocol-specific information (e.g., flags for TCP, type/code for ICMP."""
match = self.IP_PROTOCOL_PATTERN.match(self.ipProtocol)
if match:
return "ipProtocol=" + match.group(1)
if self.ipProtocol.lower() == "tcp":
return "TCP ({})".format(self.get_flag_str())
if self.ipProtocol.lower() == "icmp":
return "ICMP (type={}, code={})".format(self.icmpVar, self.icmpCode)
return self.ipProtocol
def _has_ports(self):
# type: () -> bool
return (
self.ipProtocol in ["TCP", "UDP", "DCCP", "SCTP"]
and self.srcPort is not None
and self.dstPort is not None
)
def _repr_html_(self):
# type: () -> str
return "<br>".join(self._repr_html_lines())
def _repr_html_lines(self):
# type: () -> List[str]
lines = []
lines.append(
"Start Location: {node}{iface}{vrf}".format(
node=self.ingressNode, iface=self._iface_str(), vrf=self._vrf_str()
)
)
lines.append("Src IP: %s" % self.srcIp)
if self._has_ports():
assert self.srcPort is not None
lines.append("Src Port: %d" % self.srcPort)
lines.append("Dst IP: %s" % self.dstIp)
if self._has_ports():
assert self.dstPort is not None
lines.append("Dst Port: %d" % self.dstPort)
lines.append("IP Protocol: %s" % self.get_ip_protocol_str())
if self.dscp != 0:
lines.append("DSCP: %s" % self.dscp)
if self.ecn != 0:
lines.append("ECN: %s" % self.ecn)
if self.fragmentOffset != 0:
lines.append("Fragment Offset: %d" % self.fragmentOffset)
if self.packetLength != 512:
lines.append("Packet Length: %s" % self.packetLength)
return lines
def _ip_port(self, ip, port):
# type: (str, Optional[int]) -> str
if self._has_ports():
assert port is not None
return "{ip}:{port}".format(ip=ip, port=port)
else:
return ip
@attr.s(frozen=True)
class FlowDiff(DataModelElement):
"""A difference between two Flows.
:ivar fieldName: A Flow field name that has changed.
:ivar oldValue: The old value of the field.
:ivar newValue: The new value of the field.
"""
fieldName = attr.ib(type=str)
oldValue = attr.ib(type=str)
newValue = attr.ib(type=str)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> FlowDiff
return FlowDiff(
json_dict["fieldName"], json_dict["oldValue"], json_dict["newValue"]
)
def __str__(self):
# type: () -> str
return "{fieldName}: {oldValue} -> {newValue}".format(
fieldName=self.fieldName, oldValue=self.oldValue, newValue=self.newValue
)
@attr.s(frozen=True)
class FlowTrace(DataModelElement):
"""A trace of a flow through the network.
A flowTrace is a combination of hops and flow fate (i.e., disposition).
:ivar disposition: Flow disposition
:ivar hops: A list of hops (:py:class:`FlowTraceHop`) the flow took
:ivar notes: Additional notes that help explain the disposition, if applicable.
"""
disposition = attr.ib(type=str)
hops = attr.ib(type=Sequence)
notes = attr.ib(type=Any)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> FlowTrace
return FlowTrace(
json_dict["disposition"],
[FlowTraceHop.from_dict(hop) for hop in json_dict.get("hops", [])],
json_dict.get("notes"),
)
def __str__(self):
# type: () -> str
return "{hops}\n{notes}".format(
hops="\n".join(
["{} {}".format(num, hop) for num, hop in enumerate(self.hops, start=1)]
),
notes=self.notes,
)
def __len__(self):
return len(self.hops)
def __getitem__(self, item):
return self.hops[item]
def _repr_html_(self):
# type: () -> str
return "{notes}<br>{hops}".format(
notes=self.format_notes_html(),
hops="<br><br>".join(
[
"<strong>{num}</strong> {hop}".format(
num=num, hop=hop._repr_html_()
)
for num, hop in enumerate(self.hops, start=1)
]
),
)
def format_notes_html(self):
# type: () -> str
return '<span style="color:{color}; text-weight:bold;">{notes}</span>'.format(
color=_get_color_for_disposition(self.disposition),
notes=escape_html(self.notes),
)
@attr.s(frozen=True)
class FlowTraceHop(DataModelElement):
"""A single hop in a flow trace.
:ivar edge: The :py:class:`~Edge` identifying the hop/link
:ivar routes: The routes which caused this hop
:ivar transformedFlow: The transformed version of the flow (if NAT is present)
"""
edge = attr.ib(type=Edge)
routes = attr.ib(type=List[Any])
transformedFlow = attr.ib(type=Optional[Flow])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> FlowTraceHop
transformed_flow = json_dict.get("transformedFlow")
return FlowTraceHop(
Edge.from_dict(json_dict["edge"]),
list(json_dict.get("routes", [])),
Flow.from_dict(transformed_flow) if transformed_flow else None,
)
def __str__(self):
# type: () -> str
ret_str = "{}\n Route(s):\n {}".format(
self.edge, "\n ".join(self.routes)
)
if self.transformedFlow:
ret_str += "\n Transformed flow: {}".format(self.transformedFlow)
return ret_str
def _repr_html_(self):
# type: () -> str
indent = " " * 4
result = "{edge}<br>Route(s):<br>{routes}".format(
edge=self.edge._repr_html_(),
routes=indent
+ ("<br>" + indent).join([escape_html(r) for r in self.routes]),
)
if self.transformedFlow:
result += "<br>Transformed flow: {}".format(
self.transformedFlow._repr_html_()
)
return result
class SessionAction(DataModelElement):
"""An action that a firewall session takes for return traffic matching the session."""
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> SessionAction
action = json_dict.get("type")
if action == "Accept":
return Accept()
if action == "PreNatFibLookup":
return PreNatFibLookup()
if action == "PostNatFibLookup" or action == "FibLookup":
# action == "FibLookup" supported for backwards compatibility
return PostNatFibLookup()
if action == "ForwardOutInterface":
return ForwardOutInterface.from_dict(json_dict)
raise ValueError("Invalid session action type: {}".format(action))
@attr.s(frozen=True)
class Accept(SessionAction):
"""A SessionAction whereby return traffic is accepted by the node from which it
originated.
"""
def __str__(self):
# type: () -> str
return "Accept"
@attr.s(frozen=True)
class PreNatFibLookup(SessionAction):
"""A SessionAction whereby return traffic is forwarded according to the result of a lookup
on the FIB of the interface on which the return traffic is received before NAT is applied.
"""
def __str__(self):
# type: () -> str
return "PreNatFibLookup"
@attr.s(frozen=True)
class PostNatFibLookup(SessionAction):
"""A SessionAction whereby return traffic is forwarded according to the result of a lookup
on the FIB of the interface on which the return traffic is received after NAT is applied.
"""
def __str__(self):
# type: () -> str
return "PostNatFibLookup"
@attr.s(frozen=True)
class ForwardOutInterface(SessionAction):
"""A SessionAction whereby a return flow is forwarded out a specified interface to a
specified next hop with neither FIB resolution nor ARP lookup.
:ivar nextHopHostname: Hostname of the next hop
:ivar nextHopInterface: Interface that the next hop receives
:ivar outgoingInterface: Interface of the outgoing traffic from this hop
"""
nextHopHostname = attr.ib(type=str)
nextHopInterface = attr.ib(type=str)
outgoingInterface = attr.ib(type=str)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> ForwardOutInterface
next_hop = json_dict.get("nextHop", {})
return ForwardOutInterface(
next_hop.get("hostname", ""),
next_hop.get("interface", ""),
json_dict.get("outgoingInterface", ""),
)
def __str__(self):
# type: () -> str
return "ForwardOutInterface(Next Hop: {}, Next Hop Interface: {}, Outgoing Interface: {})".format(
self.nextHopHostname, self.nextHopInterface, self.outgoingInterface
)
@attr.s(frozen=True)
class SessionMatchExpr(DataModelElement):
"""
Represents a match criteria for a firewall session.
:ivar ipProtocol: IP protocol of the flow
:ivar srcIp: Source IP of the flow
:ivar dstIp: Destination IP of the flow
:ivar srcPort: Source port of the flow
:ivar dstPort: Destination port of the flow
"""
ipProtocol = attr.ib(type=str)
srcIp = attr.ib(type=str)
dstIp = attr.ib(type=str)
srcPort = attr.ib(type=Optional[int], default=None)
dstPort = attr.ib(type=Optional[int], default=None)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> SessionMatchExpr
return SessionMatchExpr(
json_dict.get("ipProtocol", ""),
json_dict.get("srcIp", ""),
json_dict.get("dstIp", ""),
json_dict.get("srcPort"),
json_dict.get("dstPort"),
)
def __str__(self):
# type: () -> str
matchers = ["ipProtocol", "srcIp", "dstIp"]
if self.srcPort is not None and self.dstPort is not None:
matchers.extend(["srcPort", "dstPort"])
strings = ["{}={}".format(field, getattr(self, field)) for field in matchers]
return "[{}]".format(", ".join(strings))
class SessionScope(DataModelElement):
"""
Represents the scope of a firewall session.
"""
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> SessionScope
if "incomingInterfaces" in json_dict:
return IncomingSessionScope.from_dict(json_dict)
elif "originatingVrf" in json_dict:
return OriginatingSessionScope.from_dict(json_dict)
raise ValueError("Invalid session scope: {}".format(json_dict))
@attr.s(frozen=True)
class IncomingSessionScope(SessionScope):
"""
Represents scope of a firewall session established by traffic leaving specific interfaces.
:ivar incomingInterfaces: Interfaces where exiting traffic can cause a session to be established
"""
incomingInterfaces = attr.ib(type=List[str])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> IncomingSessionScope
return IncomingSessionScope(json_dict.get("incomingInterfaces", ""))
def __str__(self):
# type: () -> str
return "Incoming Interfaces: [{}]".format(", ".join(self.incomingInterfaces))
@attr.s(frozen=True)
class OriginatingSessionScope(SessionScope):
"""
Represents scope of a firewall session established by traffic accepted into a specific VRF.
:ivar originatingVrf: VRF where accepted traffic can cause a session to be established
"""
originatingVrf = attr.ib(type=str)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> OriginatingSessionScope
return OriginatingSessionScope(json_dict.get("originatingVrf", ""))
def __str__(self):
# type: () -> str
return "Originating VRF: {}".format(self.originatingVrf)
@attr.s(frozen=True)
class ArpErrorStepDetail(DataModelElement):
"""Details of a step representing the arp error of a flow when sending out of a Hop.
:ivar outputInterface: Interface of the Hop from which the flow exits
:ivar resolvedNexthopIp: Resolve next hop Ip address
"""
outputInterface = attr.ib(type=Optional[str])
resolvedNexthopIp = attr.ib(type=Optional[str])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> ArpErrorStepDetail
return ArpErrorStepDetail(
json_dict.get("outputInterface", {}).get("interface"),
json_dict.get("resolvedNexthopIp"),
)
def __str__(self):
# type: () -> str
detail_info = []
if self.outputInterface:
detail_info.append("Output Interface: {}".format(self.outputInterface))
if self.resolvedNexthopIp:
detail_info.append(
"Resolved Next Hop IP: {}".format(self.resolvedNexthopIp)
)
return ", ".join(detail_info)
@attr.s(frozen=True)
class DeliveredStepDetail(DataModelElement):
"""Details of a step representing the flow is delivered or exiting the network.
:ivar outputInterface: Interface of the Hop from which the flow exits
:ivar resolvedNexthopIp: Resolve next hop Ip address
"""
outputInterface = attr.ib(type=Optional[str])
resolvedNexthopIp = attr.ib(type=Optional[str])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> DeliveredStepDetail
return DeliveredStepDetail(
json_dict.get("outputInterface", {}).get("interface"),
json_dict.get("resolvedNexthopIp"),
)
def __str__(self):
# type: () -> str
detail_info = []
if self.outputInterface:
detail_info.append("Output Interface: {}".format(self.outputInterface))
if self.resolvedNexthopIp:
detail_info.append(
"Resolved Next Hop IP: {}".format(self.resolvedNexthopIp)
)
return ", ".join(detail_info)
@attr.s(frozen=True)
class EnterInputIfaceStepDetail(DataModelElement):
"""Details of a step representing the entering of a flow into a Hop.
:ivar inputInterface: Interface of the Hop on which this flow enters
:ivar inputVrf: VRF associated with the input interface
"""
inputInterface = attr.ib(type=str)
inputVrf = attr.ib(type=Optional[str])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> EnterInputIfaceStepDetail
return EnterInputIfaceStepDetail(
json_dict.get("inputInterface", {}).get("interface"),
json_dict.get("inputVrf"),
)
def __str__(self):
# type: () -> str
str_output = str(self.inputInterface)
return str_output
@attr.s(frozen=True)
class ExitOutputIfaceStepDetail(DataModelElement):
"""Details of a step representing the exiting of a flow out of a Hop.
:ivar outputInterface: Interface of the Hop from which the flow exits
:ivar transformedFlow: Transformed Flow if a source NAT was applied on the Flow
"""
outputInterface = attr.ib(type=str)
transformedFlow = attr.ib(type=Optional[str])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> ExitOutputIfaceStepDetail
return ExitOutputIfaceStepDetail(
json_dict.get("outputInterface", {}).get("interface"),
json_dict.get("transformedFlow"),
)
def __str__(self):
# type: () -> str
return str(self.outputInterface)
@attr.s(frozen=True)
class InboundStepDetail(DataModelElement):
"""Details of a step representing the receiving (acceptance) of a flow into a Hop.
:ivar interface: interface that owns the destination IP
"""
interface = attr.ib(type=str)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> InboundStepDetail
return InboundStepDetail(json_dict.get("interface", ""))
def __str__(self):
return str(self.interface)
@attr.s(frozen=True)
class LoopStepDetail(DataModelElement):
"""Details of a step representing a forwarding loop being detected."""
@classmethod
def from_dict(cls, json_dict):
return LoopStepDetail()
def __str__(self):
return ""
@attr.s(frozen=True)
class MatchSessionStepDetail(DataModelElement):
"""Details of a step for when a flow matches a firewall session.
:ivar sessionScope: Scope of flows session can match (incoming interfaces or originating VRF)
:ivar sessionAction: A SessionAction that the firewall takes for a matching session
:ivar matchCriteria: A SessionMatchExpr that describes the match criteria of the session
:ivar transformation: List of FlowDiffs that will be applied after session match
"""
sessionScope = attr.ib(type=SessionScope)
sessionAction = attr.ib(type=SessionAction)
matchCriteria = attr.ib(type=SessionMatchExpr)
transformation = attr.ib(type=Optional[List[FlowDiff]], factory=list)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> MatchSessionStepDetail
# backward compatibility: if sessionScope is missing, look for
# incomingInterfaces instead
if "sessionScope" in json_dict:
sessionScope = SessionScope.from_dict(json_dict.get("sessionScope", {}))
else:
sessionScope = IncomingSessionScope.from_dict(json_dict)
return MatchSessionStepDetail(
sessionScope,
SessionAction.from_dict(json_dict.get("sessionAction", {})),
SessionMatchExpr.from_dict(json_dict.get("matchCriteria", {})),
[FlowDiff.from_dict(diff) for diff in json_dict.get("transformation", [])],
)
def __str__(self):
# type: () -> str
strings = [
"{}".format(self.sessionScope),
"Action: {}".format(self.sessionAction),
"Match Criteria: {}".format(self.matchCriteria),
]
if self.transformation:
strings.append(
"Transformation: [{}]".format(", ".join(map(str, self.transformation)))
)
return ", ".join(strings)
class ForwardingDetail(DataModelElement, metaclass=ABCMeta):
def _repr_html_(self) -> str:
return escape_html(str(self))
@abstractmethod
def __str__(self) -> str:
raise NotImplementedError("ForwardingDetail elements must implement __str__")
@classmethod
def from_dict(cls, json_dict: Dict) -> "ForwardingDetail":
if "type" not in json_dict:
raise ValueError(
"Unknown type of ForwardingDetail, missing the type property in: {}".format(
json.dumps(json_dict)
)
)
fd_type = json_dict["type"]
if fd_type == "DelegatedToNextVrf":
return DelegatedToNextVrf.from_dict(json_dict)
elif fd_type == "ForwardedIntoVxlanTunnel":
return ForwardedIntoVxlanTunnel.from_dict(json_dict)
elif fd_type == "ForwardedOutInterface":
return ForwardedOutInterface.from_dict(json_dict)
elif fd_type == "Discarded":
return Discarded.from_dict(json_dict)
else:
raise ValueError(
"Unhandled ForwardingDetail type: {} in: {}".format(
json.dumps(fd_type), json.dumps(json_dict)
)
)
@attr.s(frozen=True)
class DelegatedToNextVrf(ForwardingDetail):
"""A flow being delegated to a different VRF for further processing."""
nextVrf = attr.ib(type=str)
type = attr.ib(type=str, default="DelegatedToNextVrf")
@type.validator
def check(self, _attribute, value):
if value != "DelegatedToNextVrf":
raise ValueError('type must be "DelegatedToNextVrf"')
def __str__(self) -> str:
return "Delegated to next VRF: {}".format(escape_name(self.nextVrf))
@classmethod
def from_dict(cls, json_dict: Dict[str, Any]) -> "DelegatedToNextVrf":
assert set(json_dict.keys()) == {"type", "nextVrf"}
assert json_dict["type"] == "DelegatedToNextVrf"
next_vrf = json_dict["nextVrf"]
assert isinstance(next_vrf, str)
return DelegatedToNextVrf(next_vrf)
@attr.s(frozen=True)
class ForwardedIntoVxlanTunnel(ForwardingDetail):
"""A flow being forwarded into a VXLAN tunnel."""
vni = attr.ib(type=int)
vtep = attr.ib(type=str)
type = attr.ib(type=str, default="ForwardedIntoVxlanTunnel")
@type.validator
def check(self, _attribute, value):
if value != "ForwardedIntoVxlanTunnel":
raise ValueError('type must be "ForwardedIntoVxlanTunnel"')
def __str__(self) -> str:
return "Forwarded into VXLAN tunnel with VNI: {vni} and VTEP: {vtep}".format(
vni=self.vni, vtep=self.vtep
)
@classmethod
def from_dict(cls, json_dict: Dict[str, Any]) -> "ForwardedIntoVxlanTunnel":
assert set(json_dict.keys()) == {"type", "vni", "vtep"}
assert json_dict["type"] == "ForwardedIntoVxlanTunnel"
vni = json_dict["vni"]
vtep = json_dict["vtep"]
assert isinstance(vni, int)
assert isinstance(vtep, str)
return ForwardedIntoVxlanTunnel(vni, vtep)
@attr.s(frozen=True)
class ForwardedOutInterface(ForwardingDetail):
"""A flow being forwarded out an interface.
If there is no resolved next-hop IP and this is the final step on this node, the destination IP of the flow will be used as the next gateway IP."""
outputInterface = attr.ib(type=str)
resolvedNextHopIp = attr.ib(type=Optional[str], default=None)
type = attr.ib(type=str, default="ForwardedOutInterface")
@type.validator
def check(self, _attribute, value):
if value != "ForwardedOutInterface":
raise ValueError('type must be "ForwardedOutInterface"')
def __str__(self) -> str:
return (
"Forwarded out interface: {iface} with resolved next-hop IP: {nhip}".format(
iface=escape_name(self.outputInterface), nhip=self.resolvedNextHopIp
)
if self.resolvedNextHopIp
else "Forwarded out interface: {iface}".format(
iface=escape_name(self.outputInterface)
)
)
@classmethod
def from_dict(cls, json_dict: Dict[str, Any]) -> "ForwardedOutInterface":
assert (
set(json_dict.keys())
== {
"type",
"outputInterface",
"resolvedNextHopIp",
}
or set(json_dict.keys()) == {"type", "outputInterface"}
)
assert json_dict["type"] == "ForwardedOutInterface"
output_interface = json_dict["outputInterface"]
resolved_next_hop_ip = None
assert isinstance(output_interface, str)
if "resolvedNextHopIp" in json_dict:
resolved_next_hop_ip = json_dict["resolvedNextHopIp"]
assert resolved_next_hop_ip is None or isinstance(resolved_next_hop_ip, str)
return ForwardedOutInterface(output_interface, resolved_next_hop_ip)
@attr.s(frozen=True)
class Discarded(ForwardingDetail):
"""A flow being discarded."""
type = attr.ib(type=str, default="Discarded")
@type.validator
def check(self, _attribute, value):
if value != "Discarded":
raise ValueError('type must be "Discarded"')
def __str__(self) -> str:
return "Discarded"
@classmethod
def from_dict(cls, json_dict: Dict[str, Any]) -> "Discarded":
assert json_dict == {"type": "Discarded"}
return Discarded()
@attr.s(frozen=True)
class OriginateStepDetail(DataModelElement):
"""Details of a step representing the originating of a flow in a Hop.
:ivar originatingVrf: VRF from which the Flow originates
"""
originatingVrf = attr.ib(type=str)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> OriginateStepDetail
return OriginateStepDetail(json_dict.get("originatingVrf", ""))
def __str__(self):
# type: () -> str
return str(self.originatingVrf)
@attr.s(frozen=True)
class RouteInfo(DataModelElement):
"""Contains information about the routes which led to the selection of the forwarding action for the ExitOutputIfaceStep"""
protocol = attr.ib(type=str)
network = attr.ib(type=str)
# TODO: make nextHop mandatory after sufficient period
nextHop = attr.ib(type=Optional[NextHop])
# nextHopIp populated only in absence of nextHop
# TODO: remove nextHopIp after sufficient period
nextHopIp = attr.ib(type=Optional[str])
admin = attr.ib(type=int)
metric = attr.ib(type=int)
def _old_str(self) -> str:
return "{protocol} (Network: {network}, Next Hop IP:{next_hop_ip})".format(
protocol=self.protocol,
network=self.network,
next_hop_ip=self.nextHopIp,
)
def __str__(self) -> str:
if not self.nextHop:
return self._old_str()
return "{protocol} (Network: {network}, Next Hop: {next_hop})".format(
protocol=self.protocol,
network=self.network,
next_hop=str(self.nextHop),
)
@classmethod
def from_dict(cls, json_dict: Dict[str, Any]) -> "RouteInfo":
assert set(json_dict.keys()) - {"nextHop", "nextHopIp", "nextVrf"} == {
"protocol",
"network",
"admin",
"metric",
}
protocol = json_dict.get("protocol")
assert isinstance(protocol, str)
network = json_dict.get("network")
assert isinstance(network, str)
next_hop = None
next_hop_ip = None
if "nextHop" in json_dict:
next_hop_dict = json_dict.get("nextHop")
assert isinstance(next_hop_dict, Dict)
next_hop = NextHop.from_dict(next_hop_dict)
else:
# legacy
assert "nextHopIp" in json_dict
next_hop_ip = json_dict.get("nextHopIp")
assert isinstance(next_hop_ip, str)
admin = json_dict.get("admin")
assert isinstance(admin, int)
metric = json_dict.get("metric")
assert isinstance(metric, int)
return RouteInfo(protocol, network, next_hop, next_hop_ip, admin, metric)
@attr.s(frozen=True)
class RoutingStepDetail(DataModelElement):
"""Details of a step representing the routing from input interface to output interface.
:ivar routes: List of routes which were considered to select the forwarding action
"""
routes = attr.ib(type=List[RouteInfo])
# TODO: make forwardingDetail mandatory after sufficient period
forwardingDetail = attr.ib(type=Optional[ForwardingDetail])
# TODO: remove arpIp after sufficient period
arpIp = attr.ib(type=Optional[str])
# TODO: remove outputInteface after sufficient period
outputInterface = attr.ib(type=Optional[str])
@classmethod
def from_dict(cls, json_dict: Dict) -> "RoutingStepDetail":
routes = []
routes_json_list = json_dict.get("routes", [])
assert isinstance(routes_json_list, List)
for route_json in routes_json_list:
assert isinstance(route_json, Dict)
routes.append(RouteInfo.from_dict(route_json))
forwarding_detail = None
if "forwardingDetail" in json_dict:
forwarding_detail_json = json_dict.get("forwardingDetail")
assert isinstance(forwarding_detail_json, Dict)
forwarding_detail = ForwardingDetail.from_dict(forwarding_detail_json)
arp_ip = json_dict.get("arpIp")
if arp_ip is not None:
assert isinstance(arp_ip, str)
output_interface = json_dict.get("outputInterface")
if output_interface is not None:
assert isinstance(output_interface, str)
return RoutingStepDetail(
routes,
forwarding_detail,
arp_ip,
output_interface,
)
def _old_str(self) -> str:
output = []
if self.arpIp is not None:
output.append("ARP IP: " + self.arpIp)
if self.outputInterface is not None:
output.append("Output Interface: " + self.outputInterface)
if self.routes:
output.append(
"Routes: " + "[" + ",".join([str(route) for route in self.routes]) + "]"
)
return ", ".join(output)
def __str__(self) -> str:
if not self.forwardingDetail:
return self._old_str()
output = [str(self.forwardingDetail)]
if self.routes:
output.append(
"Routes: " + "[" + ",".join([str(route) for route in self.routes]) + "]"
)
return ", ".join(output)
@attr.s(frozen=True)
class SetupSessionStepDetail(DataModelElement):
"""Details of a step for when a firewall session is created.
:ivar sessionScope: Scope of flows session can match (incoming interfaces or originating VRF)
:ivar sessionAction: A SessionAction that the firewall takes for a return traffic matching the session
:ivar matchCriteria: A SessionMatchExpr that describes the match criteria of the session
:ivar transformation: List of FlowDiffs that will be applied on the return traffic matching the session
"""
sessionScope = attr.ib(type=SessionScope)
sessionAction = attr.ib(type=SessionAction)
matchCriteria = attr.ib(type=SessionMatchExpr)
transformation = attr.ib(type=Optional[List[FlowDiff]], factory=list)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> SetupSessionStepDetail
# backward compatibility: if sessionScope is missing, look for
# incomingInterfaces instead
if "sessionScope" in json_dict:
sessionScope = SessionScope.from_dict(json_dict.get("sessionScope", {}))
else:
sessionScope = IncomingSessionScope.from_dict(json_dict)
return SetupSessionStepDetail(
sessionScope,
SessionAction.from_dict(json_dict.get("sessionAction", {})),
SessionMatchExpr.from_dict(json_dict.get("matchCriteria", {})),
[FlowDiff.from_dict(diff) for diff in json_dict.get("transformation", [])],
)
def __str__(self):
# type: () -> str
strings = [
"{}".format(self.sessionScope),
"Action: {}".format(self.sessionAction),
"Match Criteria: {}".format(self.matchCriteria),
]
if self.transformation:
strings.append(
"Transformation: [{}]".format(", ".join(map(str, self.transformation)))
)
return ", ".join(strings)
@attr.s(frozen=True)
class FilterStepDetail(DataModelElement):
"""Details of a step representing a filter step.
:ivar filter: filter name
:ivar type: filter type
:ivar inputInterface: input interface of the flow
:ivar flow: current flow
"""
filter = attr.ib(type=str)
filterType = attr.ib(type=str)
inputInterface = attr.ib(type=str)
flow = attr.ib(type=Optional[Flow])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> FilterStepDetail
flowObj = json_dict.get("flow", {})
return FilterStepDetail(
json_dict.get("filter", ""),
json_dict.get("type", ""),
json_dict.get("inputInterface", ""),
Flow.from_dict(flowObj) if flowObj else None,
)
def __str__(self):
# type: () -> str
return "{} ({})".format(self.filter, self.filterType)
@attr.s(frozen=True)
class PolicyStepDetail(DataModelElement):
"""Details of a step representing a generic policy processing step
(e.g., PBR or equivalent).
:ivar policy: policy name
:ivar type: filter type
"""
policy = attr.ib(type=str)
@classmethod
def from_dict(cls, json_dict: Dict[str, Any]) -> "PolicyStepDetail":
return PolicyStepDetail(json_dict.get("policy", ""))
def __str__(self) -> str:
return "{}".format(self.policy)
@attr.s(frozen=True)
class TransformationStepDetail(DataModelElement):
"""Details of a step representation a packet transformation.
:ivar transformationType: The type of the transformation
:ivar flowDiffs: Set of changed flow fields
"""
transformationType = attr.ib(type=str)
flowDiffs = attr.ib(type=List[FlowDiff])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> TransformationStepDetail
return TransformationStepDetail(
json_dict["transformationType"],
[FlowDiff.from_dict(fd) for fd in json_dict.get("flowDiffs", [])],
)
def __str__(self):
# type: () -> str
if not self.flowDiffs:
return self.transformationType
return "{type} {diffs}".format(
type=self.transformationType,
diffs=", ".join(str(flowDiff) for flowDiff in self.flowDiffs),
)
@attr.s(frozen=True)
class Step(DataModelElement):
"""Represents a step in a hop.
:ivar detail: Details about the step
:ivar action: Action taken in this step
"""
detail = attr.ib(type=Any)
action = attr.ib(type=str, converter=str)
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> Optional[Step]
from_dicts = {
"ArpError": ArpErrorStepDetail.from_dict,
"Delivered": DeliveredStepDetail.from_dict,
"EnterInputInterface": EnterInputIfaceStepDetail.from_dict,
"ExitOutputInterface": ExitOutputIfaceStepDetail.from_dict,
"Inbound": InboundStepDetail.from_dict,
"Loop": LoopStepDetail.from_dict,
"MatchSession": MatchSessionStepDetail.from_dict,
"Originate": OriginateStepDetail.from_dict,
"Routing": RoutingStepDetail.from_dict,
"SetupSession": SetupSessionStepDetail.from_dict,
"Transformation": TransformationStepDetail.from_dict,
"Policy": PolicyStepDetail.from_dict,
"Filter": FilterStepDetail.from_dict,
}
action = json_dict.get("action")
detail = json_dict.get("detail", {})
type = json_dict.get("type")
if type not in from_dicts:
return None
else:
return Step(from_dicts[type](detail), action)
def __str__(self):
# type: () -> str
action_str = str(self.action)
detail_str = str(self.detail) if self.detail else None
if detail_str:
return "{}({})".format(action_str, detail_str)
else:
return action_str
def _repr_html_(self):
# type: () -> str
return str(self)
@attr.s(frozen=True)
class Hop(DataModelElement):
"""A single hop in a flow trace.
:ivar node: Name of node considered as the Hop
:ivar steps: List of steps taken at this Hop
"""
node = attr.ib(type=str)
steps = attr.ib(type=List[Step])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> Hop
steps = [] # type: List[Step]
for step in json_dict["steps"]:
step_obj = Step.from_dict(step)
if step_obj is not None:
steps.append(step_obj)
return Hop(json_dict.get("node", {}).get("name"), steps)
def __len__(self):
return len(self.steps)
def __getitem__(self, item):
return self.steps[item]
def __str__(self):
# type: () -> str
return "node: {node}\n {steps}".format(
node=self.node, steps="\n ".join(map(str, self.steps))
)
def _repr_html_(self):
# type: () -> str
return "node: {node}<br> {steps}".format(
node=self.node,
steps="<br> ".join([step._repr_html_() for step in self.steps]),
)
@staticmethod
def _get_routes_data(routes):
# type: (List[Dict]) -> List[str]
routes_str = [] # type: List[str]
for route in routes:
routes_str.append(
"{protocol} [Network: {network}, Next Hop IP:{next_hop_ip}]".format(
protocol=route.get("protocol"),
network=route.get("network"),
next_hop_ip=route.get("nextHopIp"),
)
)
return routes_str
@attr.s(frozen=True)
class Trace(DataModelElement):
"""A trace of a flow through the network.
A Trace is a combination of hops and flow fate (i.e., disposition).
:ivar disposition: Flow disposition
:ivar hops: A list of hops (:py:class:`Hop`) the flow took
"""
disposition = attr.ib(type=str)
hops = attr.ib(type=List[Hop])
@classmethod
def from_dict(cls, json_dict):
# type: (Dict) -> Trace
return Trace(
json_dict["disposition"],
[Hop.from_dict(hop) for hop in json_dict.get("hops", [])],
)
def __len__(self):
return len(self.hops)
def __getitem__(self, item):
return self.hops[item]
def __str__(self):
# type: () -> str
return "{disposition}\n{hops}".format(
disposition=self.disposition,
hops="\n".join(
[
"{num}. {hop}".format(num=num, hop=hop)
for num, hop in enumerate(self.hops, start=1)
]
),
)
def _repr_html_(self):
# type: () -> str
disposition_span = '<span style="color:{color}; text-weight:bold;">{disposition}</span>'.format(
color=_get_color_for_disposition(self.disposition),
disposition=self.disposition,
)
return "{disposition_span}<br>{hops}".format(
disposition_span=disposition_span,
hops="<br>".join(
[
"<strong>{num}</strong>. {hop}".format(
num=num, hop=hop._repr_html_()
)
for num, hop in enumerate(self.hops, start=1)
]
),
)
@attr.s(frozen=True)
class TcpFlags(DataModelElement):
"""
Represents a set of TCP flags in a packet.
:ivar ack:
:ivar cwr:
:ivar ece:
:ivar fin:
:ivar psh:
:ivar rst:
:ivar syn:
:ivar urg:
"""
ack = attr.ib(default=False, type=bool)
cwr = attr.ib(default=False, type=bool)
ece = attr.ib(default=False, type=bool)
fin = attr.ib(default=False, type=bool)
psh = attr.ib(default=False, type=bool)
rst = attr.ib(default=False, type=bool)
syn = attr.ib(default=False, type=bool)
urg = attr.ib(default=False, type=bool)
@classmethod
def from_dict(cls, json_dict):
return TcpFlags(
ack=json_dict["ack"],
cwr=json_dict["cwr"],
ece=json_dict["ece"],
fin=json_dict["fin"],
psh=json_dict["psh"],
rst=json_dict["rst"],
syn=json_dict["syn"],
urg=json_dict["urg"],
)
@attr.s(frozen=True)
class MatchTcpFlags(DataModelElement):
"""
Match given :py:class:`TcpFlags`.
For each bit in the TCP flags, a `useX`
must be set to true, otherwise the bit is treated as "don't care".
:ivar tcpFlags: tcp flags to match
:ivar useAck:
:ivar useCwr:
:ivar useEce:
:ivar useFin:
:ivar usePsh:
:ivar useRst:
:ivar useSyn:
:ivar useUrg:
"""
tcpFlags = attr.ib(type=TcpFlags)
useAck = attr.ib(default=True, type=bool)
useCwr = attr.ib(default=True, type=bool)
useEce = attr.ib(default=True, type=bool)
useFin = attr.ib(default=True, type=bool)
usePsh = attr.ib(default=True, type=bool)
useRst = attr.ib(default=True, type=bool)
useSyn = attr.ib(default=True, type=bool)
useUrg = attr.ib(default=True, type=bool)
@classmethod
def from_dict(cls, json_dict):
return MatchTcpFlags(
TcpFlags.from_dict(json_dict["tcpFlags"]),
json_dict["useAck"],
json_dict["useCwr"],
json_dict["useEce"],
json_dict["useFin"],
json_dict["usePsh"],
json_dict["useRst"],
json_dict["useSyn"],
json_dict["useUrg"],
)
@staticmethod
def match_ack():
# type: () -> MatchTcpFlags
"""Return match conditions checking that ACK bit is set.
Other bits may take any value.
"""
return MatchTcpFlags(TcpFlags(ack=True), useAck=True)
@staticmethod
def match_rst():
# type: () -> MatchTcpFlags
"""Return match conditions checking that RST bit is set.
Other bits may take any value.
"""
return MatchTcpFlags(TcpFlags(rst=True), useRst=True)
@staticmethod
def match_syn():
# type: () -> MatchTcpFlags
"""Return match conditions checking that the SYN bit is set.
Other bits may take any value.
"""
return MatchTcpFlags(TcpFlags(syn=True), useSyn=True)
@staticmethod
def match_synack():
# type: () -> MatchTcpFlags
"""Return match conditions checking that both the SYN and ACK bits are set.
Other bits may take any value.
"""
return MatchTcpFlags(TcpFlags(ack=True, syn=True), useAck=True, useSyn=True)
@staticmethod
def match_established():
# type: () -> List[MatchTcpFlags]
"""Return a list of match conditions matching an established flow (ACK or RST bit set).
Other bits may take any value.
"""
return [MatchTcpFlags.match_ack(), MatchTcpFlags.match_rst()]
@staticmethod
def match_not_established():
# type: () -> List[MatchTcpFlags]
"""Return a list of match conditions matching a non-established flow.
Meaning both ACK and RST bits are unset.
Other bits may take any value.
"""
return [
MatchTcpFlags(
useAck=True, useRst=True, tcpFlags=TcpFlags(ack=False, rst=False)
)
]
def _get_color_for_disposition(disposition):
# type: (str) -> str
success_dispositions = {"ACCEPTED", "DELIVERED_TO_SUBNET", "EXITS_NETWORK"}
if disposition in success_dispositions:
return "#019612"
else:
return "#7c020e"
def _normalize_phc_intspace(value):
# type: (Any) -> Optional[Text]
if value is None or isinstance(value, str):
return value
if isinstance(value, int):
return str(value)
if isinstance(value, Iterable):
result = ",".join(str(v) for v in value)
return result
raise ValueError("Invalid value {}".format(value))
def _normalize_phc_list(value):
# type: (Any) -> Optional[List[Text]]
if value is None or isinstance(value, list):
return value
elif isinstance(value, str):
# only collect truthy values
alist = [v for v in [v.strip() for v in value.split(",")] if v]
if not alist:
# reject empty list values
raise ValueError("Invalid value {}".format(value))
return alist
raise ValueError("Invalid value {}".format(value))
def _normalize_phc_tcpflags(value):
# type: (Any) -> Optional[List[MatchTcpFlags]]
if value is None or isinstance(value, list):
return value
elif isinstance(value, MatchTcpFlags):
return [value]
raise ValueError("Invalid value {}".format(value))
def _normalize_phc_strings(value):
# type: (Any) -> Optional[Text]
if value is None or isinstance(value, str):
return value
if isinstance(value, Iterable):
result = ",".join(value) # type: Text
return result
raise ValueError("Invalid value {}".format(value))
@attr.s(frozen=True)
class HeaderConstraints(DataModelElement):
"""Constraints on an IPv4 packet header space.
Specify constraints on packet headers by specifying lists of allowed values
in each field of IP packet.
:ivar srcIps: Source location/IP
:vartype srcIps: str
:ivar dstIps: Destination location/IP
:vartype dstIps: str
:ivar srcPorts: Source ports as list of ranges (e.g., ``"22,53-99"``)
:ivar dstPorts: Destination ports as list of ranges, (e.g., ``"22,53-99"``)
:ivar applications: Shorthands for application protocols (e.g., ``SSH``, ``DNS``, ``SNMP``)
:ivar ipProtocols: List of well-known IP protocols (e.g., ``TCP``, ``UDP``, ``ICMP``)
:ivar icmpCodes: List of integer ICMP codes
:ivar icmpTypes: List of integer ICMP types
:ivar dscps: List of allowed DSCP value ranges
:ivar ecns: List of allowed ECN values ranges
:ivar packetLengths: List of allowed packet length value ranges
:ivar fragmentOffsets: List of allowed fragmentOffset value ranges
:ivar tcpFlags: List of :py:class:`MatchTcpFlags` -- conditions on which
TCP flags to match
Lists of values in each fields are subject to a logical "OR":
>>> HeaderConstraints(ipProtocols=["TCP", "UDP"])
HeaderConstraints(srcIps=None, dstIps=None, srcPorts=None, dstPorts=None, ipProtocols=['TCP', 'UDP'], applications=None,
icmpCodes=None, icmpTypes=None, ecns=None, dscps=None, packetLengths=None, fragmentOffsets=None, tcpFlags=None)
means allow TCP OR UDP.
Different fields are ANDed together:
>>> HeaderConstraints(srcIps="1.1.1.1", dstIps="2.2.2.2", applications=["SSH"])
HeaderConstraints(srcIps='1.1.1.1', dstIps='2.2.2.2', srcPorts=None, dstPorts=None, ipProtocols=None, applications=['SSH'],
icmpCodes=None, icmpTypes=None, ecns=None, dscps=None, packetLengths=None, fragmentOffsets=None, tcpFlags=None)
means an SSH connection originating at ``1.1.1.1`` and going to ``2.2.2.2``
Any ``None`` values will be treated as unconstrained.
"""
# Order params in likelihood of specification
srcIps = attr.ib(default=None, type=Optional[str])
dstIps = attr.ib(default=None, type=Optional[str])
srcPorts = attr.ib(
default=None, type=Optional[str], converter=_normalize_phc_intspace
)
dstPorts = attr.ib(
default=None, type=Optional[str], converter=_normalize_phc_intspace
)
ipProtocols = attr.ib(
default=None, type=Optional[List[str]], converter=_normalize_phc_list
)
applications = attr.ib(
default=None, type=Optional[List[str]], converter=_normalize_phc_list
)
icmpCodes = attr.ib(
default=None, type=Optional[str], converter=_normalize_phc_intspace
)
icmpTypes = attr.ib(
default=None, type=Optional[str], converter=_normalize_phc_intspace
)
ecns = attr.ib(default=None, type=Optional[str], converter=_normalize_phc_intspace)
dscps = attr.ib(default=None, type=Optional[str], converter=_normalize_phc_intspace)
packetLengths = attr.ib(
default=None, type=Optional[str], converter=_normalize_phc_intspace
)
fragmentOffsets = attr.ib(
default=None, type=Optional[str], converter=_normalize_phc_intspace
)
tcpFlags = attr.ib(
default=None, type=Optional[MatchTcpFlags], converter=_normalize_phc_tcpflags
)
@classmethod
def from_dict(cls, json_dict):
return HeaderConstraints(
srcIps=json_dict.get("srcIps"),
dstIps=json_dict.get("dstIps"),
srcPorts=json_dict.get("srcPorts"),
dstPorts=json_dict.get("dstPorts"),
ipProtocols=json_dict.get("ipProtocols"),
applications=json_dict.get("applications"),
icmpCodes=json_dict.get("icmpCodes"),
icmpTypes=json_dict.get("icmpTypes"),
ecns=json_dict.get("ecns"),
dscps=json_dict.get("dscps"),
packetLengths=json_dict.get("packetLengths"),
fragmentOffsets=json_dict.get("fragmentOffsets"),
)
@classmethod
def of(cls, flow):
# type: (Flow) -> HeaderConstraints
"""Create header constraints from an existing flow."""
srcPorts = dstPorts = icmpCodes = icmpTypes = tcpFlags = None
if flow._has_ports():
srcPorts = str(flow.srcPort)
dstPorts = str(flow.dstPort)
if flow.ipProtocol.lower() == "icmp":
icmpCodes = flow.icmpCode
icmpTypes = flow.icmpVar
if flow.ipProtocol.lower() == "tcp":
tcpFlags = MatchTcpFlags(
tcpFlags=TcpFlags(
bool(flow.tcpFlagsAck),
bool(flow.tcpFlagsCwr),
bool(flow.tcpFlagsEce),
bool(flow.tcpFlagsFin),
bool(flow.tcpFlagsPsh),
bool(flow.tcpFlagsRst),
bool(flow.tcpFlagsSyn),
bool(flow.tcpFlagsUrg),
)
)
return HeaderConstraints(
srcIps=flow.srcIp,
dstIps=flow.dstIp,
ipProtocols=[str(flow.ipProtocol)],
srcPorts=srcPorts,
dstPorts=dstPorts,
icmpCodes=icmpCodes,
icmpTypes=icmpTypes,
tcpFlags=tcpFlags,
fragmentOffsets=flow.fragmentOffset,
packetLengths=flow.packetLength,
)
@attr.s(frozen=True)
class PathConstraints(DataModelElement):
"""
Constraints on the path of a flow.
:ivar startLocation: Location specification for where a flow is allowed to start
:ivar endLocation: Node specification for where a flow is allowed to terminate
:ivar transitLocations: Node specification for where a flow must transit
:ivar forbiddenLocations: Node specification for where a flow is *not* allowed to transit
"""
startLocation = attr.ib(default=None, type=Optional[str])
endLocation = attr.ib(default=None, type=Optional[str])
transitLocations = attr.ib(default=None, type=Optional[str])
forbiddenLocations = attr.ib(default=None, type=Optional[str])
@classmethod
def from_dict(cls, json_dict):
return PathConstraints(
startLocation=json_dict.get("startLocation"),
endLocation=json_dict.get("endLocation"),
transitLocations=json_dict.get("transitLocations"),
forbiddenLocations=json_dict.get("forbiddenLocations"),
)
| batfish/pybatfish | pybatfish/datamodel/flow.py | Python | apache-2.0 | 57,636 |
#-------------------------------------------------------------------------------
# Name: IENA
# Purpose: Class to pack and unpack IENA packets
#
# Author: DCollins
#
# Created: 19/12/2013
#
# Copyright 2014 Diarmuid Collins
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import struct
import datetime,time
def unpack48(x):
x2, x3 = struct.unpack('>HI', x)
return x3 | (x2 << 32)
class IENA ():
'''Class to pack and unpack IENA(tm) payloads. IENA(tm) is an proprietary payload format
developed by Airbus for use in FTI networks. It is usually transmitted in a UDP packet
containing parameter data acquired from sensors and buses'''
IENA_HEADER_FORMAT = '>HHHIBBH'
IENA_HEADER_LENGTH = struct.calcsize(IENA_HEADER_FORMAT)
TRAILER_LENGTH = 2
def __init__(self):
'''Constructor class for an IENA payload'''
self.key = None # know as ienaky
""":type : int"""
self.size = None
""":type : int"""
self.timeusec = None
""":type : int"""
self.keystatus = None
""":type : int"""
self.status = None
""":type : int"""
self.sequence = None
""":type : int"""
self.endfield = 0xdead
""":type : int"""
self.payload = None #string containing payload
""":type : str"""
self._packetStrut = struct.Struct(IENA.IENA_HEADER_FORMAT)
# only calculate this once
self._startOfYear = datetime.datetime(datetime.datetime.today().year, 1, 1, 0, 0, 0,0)
self.lengthError = False # Flag to verify the buffer length
def unpack(self,buf,ExceptionOnLengthError=False):
'''Unpack a raw byte stream to an IENA object
Accepts a buffer to unpack as the required argument
:type buf: str
:type ExceptionOnLengthError: bool
'''
# Some checking
if len(buf) < IENA.IENA_HEADER_LENGTH:
raise ValueError("Buffer passed to unpack is too small to be an IENA packet")
(self.key, self.size, timehi, timelo, self.keystatus, self.status, self.sequence) = self._packetStrut.unpack_from(buf)
self.timeusec = timelo | (timehi << 32)
if self.size*2 != len(buf):
self.lengthError = True
if ExceptionOnLengthError:
raise ValueError
self.payload = buf[IENA.IENA_HEADER_LENGTH:-2]
(self.endfield,) = struct.unpack(">H",buf[-2:]) # last two bytes are the trailer
def pack(self):
'''Pack the IENA payload into a binary format
:rtype: str
'''
timehi = self.timeusec >> 32
timelo = self.timeusec % 0x100000000
for required_field in [self.key,timehi,timelo,self.keystatus,self.status,self.sequence,self.endfield,self.payload]:
if required_field == None:
raise ValueError("A required field in the IENA packet is not defined")
self.size = int((len(self.payload) + IENA.IENA_HEADER_LENGTH + IENA.TRAILER_LENGTH)/2) # size is in words
packetvalues = (self.key,self.size,timehi,timelo,self.keystatus,self.status,self.sequence)
packet = self._packetStrut.pack(*packetvalues) + self.payload + struct.pack('>H',self.endfield)
return packet
def _getPacketTime(self):
'''Return the Packet time in standard UNIX time
:rtype: int
'''
return int(self.timeusec/1e6 + time.mktime(self._startOfYear.timetuple()))
def setPacketTime(self,utctimestamp,microseconds=0):
''''Set the packet timestamp
:type timestamp: int
:type microseconds: int
'''
seconds_this_year = utctimestamp - int(time.mktime(self._startOfYear.timetuple()))
packettime = microseconds + int(seconds_this_year)*1000000
self.timeusec = packettime
| douglaskastle/AcraNetwork | AcraNetwork/IENA.py | Python | gpl-2.0 | 4,674 |
from .secret import *
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + TESTING_APPS + CUSTOM_APPS + DEBUG_APPS
DEBUG = True
TEMPLATE_DEBUG = DEBUG
| cpscanarias/ssfinder-back-end | ssfinder_back_end/ssfinder_back_end/projects_settings/local.py | Python | gpl-3.0 | 151 |
#chunk_size = 64
freq_table = {'Xeon 3.2Ghz': 3.2*1024*1024*1024,
'Xeon 3.0Ghz': 3.0*1024*1024*1024,
'Xeon 2.5GHz L5420': 2.5*1000*1000*1000,
'2.4Ghz': 2.3*1024*1024*1024,
'2.3Ghz': 2.3*1024*1024*1024,
'2.0Ghz': 2.0*1024*1024*1024,
'1.8Ghz': 1.5*1024*1024*1024,
'1.6Ghz': 1.5*1024*1024*1024,
}
#read_bw_table = {'Seagate': 280.0*1024*1024,
read_bw_table = {'drive1': 280.0*1024*1024,
'drive2': 270.0*1024*1024,
'drive3': 260.0*1024*1024,
'drive4': 250.0*1024*1024,
'drive5': 270.0*1024*1024,
'drive6': 260.0*1024*1024,
'drive7': 250.0*1024*1024
}
#write_bw_table = {'Seagate': 75.0*1024*1024,
write_bw_table = {'drive1': 75.0*1024*1024,
'drive2': 70.0*1024*1024,
'drive3': 65.0*1024*1024,
'drive4': 60.0*1024*1024,
'drive5': 65.0*1024*1024,
'drive6': 60.0*1024*1024,
'drive7': 65.0*1024*1024
}
int_bw = '1Gb'
int_latency = '0.15ms'
ext_bw = '1Gb'
ext_latency = '0.15ms'
| toomanyjoes/mrperfcs386m | internetTest/hadoop_conf.py | Python | mit | 952 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reads variable size batches of data from a data set and stores read data.
`VariableBatchReader` reads variable size data from a dataset.
`CachedDataReader` on top of `VariableBatchReader` adds functionality to store
the read batch for use in the next session.run() call.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow.compat.v1 as tf
def _slice_data(stored_data, size):
return [data[:size] for data in stored_data]
class VariableBatchReader(object):
"""Read data of varying batch sizes from a data set."""
def __init__(self, dataset, max_batch_size):
"""Initializes class.
Args:
dataset: List of Tensors representing the dataset, shuffled, repeated,
and batched into mini-batches of size at least `max_batch_size`. In
other words it should be reshuffled at each session.run call. This can
be done with the tf.data package using the construction demonstrated in
load_mnist() function in examples/autoencoder_auto_damping.py.
max_batch_size: `int`. Maximum batch size of the data that can be
retrieved from the data set.
"""
self._dataset = dataset
self._max_batch_size = max_batch_size
def __call__(self, batch_size):
"""Reads `batch_size` data.
Args:
batch_size: Tensor of type `int32`, batch size of the data to be
retrieved from the dataset. `batch_size` should be less than or
equal to `max_batch_size`.
Returns:
Read data, An iterable of tensors with batch size equal to `batch_size`.
"""
check_size = tf.assert_less_equal(
batch_size,
tf.convert_to_tensor(self._max_batch_size, dtype=tf.int32),
message='Data set read failure, Batch size greater than max allowed.'
)
with tf.control_dependencies([check_size]):
return _slice_data(self._dataset, batch_size)
class CachedDataReader(VariableBatchReader):
"""Provides functionality to store variable batch size data."""
def __init__(self, dataset, max_batch_size):
"""Initializes class and creates variables for storing previous batch.
Args:
dataset: List of Tensors representing the dataset, shuffled, repeated,
and batched into mini-batches of size at least `max_batch_size`. In
other words it should be reshuffled at each session.run call. This can
be done with the tf.data package using the construction demonstrated in
load_mnist() function in examples/autoencoder_auto_damping.py.
max_batch_size: `int`. Maximum batch size of the data that can be
retrieved from the data set.
"""
super(CachedDataReader, self).__init__(dataset, max_batch_size)
with tf.variable_scope('cached_data_reader'):
self._cached_batch_storage = [
tf.get_variable(
name='{}{}'.format('cached_batch_storage_', i),
shape=[max_batch_size]+ var.shape.as_list()[1:],
dtype=var.dtype,
trainable=False,
use_resource=True) for i, var in enumerate(self._dataset)
]
self._cached_batch_size = tf.get_variable(
name='cached_batch_size', shape=(), dtype=tf.int32, trainable=False,
use_resource=True)
self._cached_batch = _slice_data(self._cached_batch_storage,
self._cached_batch_size)
def __call__(self, batch_size):
"""Reads `batch_size` data and stores the read batch.
Args:
batch_size: Tensor of type `int32`, batch size of the data to be
retrieved from the dataset. `batch_size` should be less than or
equal to `max_batch_size`.
Returns:
Read data, An iterable of tensors with batch size equal to `batch_size`.
"""
sliced_data = super(CachedDataReader, self).__call__(batch_size)
# We need to make sure we read the cached batch before we update it!
with tf.control_dependencies(self._cached_batch):
batch_size_assign_op = self._cached_batch_size.assign(batch_size)
data_assign_ops = [
prev[:batch_size].assign(cur) # yes, this actually works
for prev, cur in zip(self._cached_batch_storage, sliced_data)
]
with tf.control_dependencies(data_assign_ops + [batch_size_assign_op]):
return [tf.identity(sdata) for sdata in sliced_data]
@property
def cached_batch(self):
return self._cached_batch
| tensorflow/kfac | kfac/python/ops/kfac_utils/data_reader.py | Python | apache-2.0 | 5,162 |
from django.contrib import admin
from .models import Message
# Register your models here.
admin.site.register(Message)
| lyubomir1993/AlohaServer | admin.py | Python | apache-2.0 | 121 |
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class ShopifyAccount(ProviderAccount):
pass
class ShopifyProvider(OAuth2Provider):
id = 'shopify'
name = 'Shopify'
account_class = ShopifyAccount
def get_auth_params(self, request, action):
ret = super(ShopifyProvider, self).get_auth_params(request, action)
shop = request.GET.get('shop', None)
if shop:
ret.update({'shop': shop})
return ret
def get_default_scope(self):
return ['read_orders', 'read_products']
def extract_uid(self, data):
return str(data['shop']['id'])
def extract_common_fields(self, data):
# See: https://docs.shopify.com/api/shop
# User is only available with Shopify Plus, email is the only
# common field
return dict(email=data['shop']['email'])
providers.registry.register(ShopifyProvider)
| wli/django-allauth | allauth/socialaccount/providers/shopify/provider.py | Python | mit | 1,032 |
import os, olefile, re, tempfile
from joblib import Parallel, delayed
from datetime import date
def create_tsv(di, file):
file.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (di['name'].decode(), di['prod code'].decode(), di['sku'].decode(), di['density'].decode(), di['sku_letter'].decode(), di['form_factor'].decode(), di['pba'].decode(), di['save time'], di['path']))
def process_bigdata(di: dict) -> list:
s = (re.sub(b'\\r|\\n', b'', di['big data'])).split(b')')
del di['big data']
s = [x for x in s if b'XXX' in x]
lst = []
for item in s:
temp = {'name': di['name'], 'prod code': di['prod code'], 'path': di['path'], 'save time': di['save time']}
if b'=' in item:
b = item.partition(b'=')
j = b[2].lstrip()
temp['pba'] = j + b')'
temp['sku_letter'] = (b[0][-3:]).strip(b'- ')
j = (j[j.find(b'(')+1:])
a = re.search(b'[\\d.]{2,4}[\\s]*[GT][\\s]*[Bb]*[\\s]*', j)
try:
k = re.sub(b'[GgBbTt\\s]', b'', j[a.start():a.end()])
if b'.' in k:
temp['density'] = (k.replace(b'.', b'P'))[0:]
else:
temp['density'] = k
temp['form_factor'] = j.replace(j[a.start():a.end()], b'')
except AttributeError:
temp['density'] = b''
temp['form_factor'] = j
temp['sku'] = temp['density'] + temp['sku_letter']
elif b'Alternative' in item:
temp['pba'] = item.lstrip() + b')'
temp['sku_letter'] = b'Alternate'
temp['density'] = b'Alternate'
temp['sku'] = b'Alternate'
temp['form_factor'] = b'Alternate'
lst.append(temp)
return lst
def proper_reg(temp_text_path: str):
"""Finds the name and prod code. Big data contains the other essential information. Big data is processed by process."""
with open(temp_text_path, 'r+b') as f:
text = f.read()
try:
dit = {}
boolean = True
if "2d label spec" in temp_text_path.lower():
i = text.find(b'PP= PRODUCT IDENTIFIER') + 22
else:
i = text.find(b'PP = Product Identifier')+23
boolean = False
regex = re.compile(b'\\s*=\\s*')
j = regex.search(text, i)
if abs(i-j.start()) <= 3:
dit['prod code'] = text[i:i+3].strip()
regex = re.compile(b'\\r|\\n')
dit['name'] = text[j.end(): regex.search(text, j.end()).start()].strip().title()
else:
dit['prod code'] = text[j.end():j.end()+2].strip()
dit['name'] = text[i: j.start()].strip().title()
if boolean:
i = text.find(b'PBA number', j.end()) + 11
dit['big data'] = text[i:text.find(b'Human readable', i)-2]
if b'human' in dit['name']:
dit['name'] = dit['name'][:dit['name'].find(b'human')]
else:
i = text.find(b'PBA#)', j.end()) + 5
dit['big data'] = text[i:text.find(b'CC', i) - 2]
return dit
except:
print(temp_text_path.split('/')[-1], ' is formatted irregularly. Was not processed')
def format_doc(enter: str, tempdir: str):
"""Opens a .doc path, creates a temp text file and directory, and writes only the alphanumeric and punctuation characters to the file."""
ole = olefile.OleFileIO(enter)
txt = ole.openstream('WordDocument').read()
temp_txt_path = tempdir + "\\" + enter.split('/')[-1] + ".txt"
with open(temp_txt_path, 'w+b') as f:
f.write(re.sub(b'[^\\x20-\\x7F\\r]', b'', txt[txt.find(b'REV'): txt.rfind(b'Barcode')]))
return temp_txt_path
def extract_metadata(path: str) -> dict:
"""Finds the path and last saved time for each doc."""
di = {}
ole = olefile.OleFileIO(path)
meta = ole.get_metadata()
di['path'] = path
di['save time'] = date(meta.last_saved_time.year, meta.last_saved_time.month, meta.last_saved_time.day)
return di
def rev(prod: list, revis: str):
try:
i = 0
r = len(prod)
while i < r:
r = len(prod)
g = i + 1
tup = prod[i].partition(revis)
rev1 = int(tup[2].lstrip()[0:2])
a = True
while g < r:
r = len(prod)
uptup = prod[g].partition(revis)
uprev = int(uptup[2].lstrip()[0:2])
if tup[0].rstrip().rsplit(' ', 1)[1] == uptup[0].rstrip().rsplit(' ', 1)[1]:
if rev1 > uprev:
prod.remove(prod[g])
elif uprev > rev1:
prod.remove(prod[i])
a = False
break
else:
g += 1
else:
g += 1
if a:
i += 1
except IndexError:
print(prod, i, g, r)
def find_duplicates(parallel, tempdir, *arg) -> list:
"""First for loop creates a dictionary for each PBA for all the doc paths and appends it to final_set. Second half deletes duplicate dictionaries."""
final_set = []
for prod_doc in arg:
if len(prod_doc) > 0:
rev(prod_doc, "rev")
meta = parallel(delayed(extract_metadata)(enter) for enter in prod_doc)
prod_doc = parallel(delayed(format_doc)(enter, tempdir) for enter in prod_doc)
if len(prod_doc) >= 1:
prod_doc = parallel(delayed(proper_reg)(enter) for enter in prod_doc)
for i in range(len(meta)):
prod_doc[i] = {**prod_doc[i], **meta[i]}
prod_doc = parallel(delayed(process_bigdata)(di) for di in prod_doc)
for lst in prod_doc:
for di in lst:
final_set.append(di)
r = len(final_set)
f = 0
while f < r:
r = len(final_set)
g = f+1
di = final_set[f]
temp = {'prod code': di['prod code'], 'sku': di['sku'], 'density': di['density'], 'pba': di['pba'].partition(b'(')[0]}
while g < r:
temp2 = {'prod code': final_set[g]['prod code'], 'sku': final_set[g]['sku'], 'density': final_set[g]['density'], 'pba': final_set[g]['pba'].partition(b'(')[0]}
if temp == temp2:
final_set.remove(final_set[g])
r = len(final_set)
else:
g += 1
f += 1
return final_set
def sort_list(doc_name: str, *vart):
"""Checks if a string contains the identifiers in vart. If it does return the initial string, if not return None."""
for var in vart:
if var.lower() in doc_name.lower() and "(old" not in doc_name and "~" not in doc_name:
return doc_name
def find_docs(path: str, vart: tuple = ()) -> list:
"""Scans a directory for .doc files with the identifiers in vart. Calls itself to scan subdirectories."""
doc_list = []
for sub_entry in os.scandir(path):
if sub_entry.is_dir():
if len(vart) != 0:
for var in vart:
if var.lower() in sub_entry.name.lower():
doc_list += (find_docs(path + "/" + sub_entry.name, vart))
elif sub_entry.name.endswith(".doc"):
doc_list.append(path + "/" + sub_entry.name)
return doc_list
def scan_dir(path: str, *vart) -> list:
""""Scans a directory for .doc files with the identifiers in vart. Calls find_docs to scan subdirectories."""
doc_list = []
for entry in os.scandir(path):
if entry.is_dir():
if len(vart) != 0:
for var in vart:
if var.lower() in entry.name.lower():
doc_list += (find_docs(path + "/" + entry.name, vart))
else:
doc_list += (find_docs(path + "/" + entry.name))
elif '.doc' in entry.name:
doc_list.append(path + "/" + entry.name)
return list(set(doc_list))
def main(file_name='lenis.tsv', path="//amr/ec/proj/fm/nsg/NAND/Shares/FailureAnalysis/rfrickey/20160808_Label_Spec_File_Copy"):
""" """
folder_list = os.scandir(path)
f = open(file_name, 'w+')
with tempfile.TemporaryDirectory() as tempdir:
with Parallel(n_jobs=-1) as parallel:
for orig_fold in folder_list:
prod_doc = scan_dir(path + "/" + orig_fold.name, 'label spec', orig_fold.name)
prod_doc = parallel(delayed(sort_list)(entry, "isn label spec", "2d label spec") for entry in prod_doc)
prod_doc = [x for x in prod_doc if x is not None]
final_set = find_duplicates(parallel, tempdir, prod_doc)
for i in final_set:
create_tsv(i, f)
f.close()
if __name__ == '__main__':
main()
| NaveenGop/projects | Intel/WordParse.py | Python | apache-2.0 | 9,074 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Class for reading GRD files into memory, without processing them.
'''
import os.path
import types
import xml.sax
import xml.sax.handler
from grit import exception
from grit.node import base
from grit.node import mapping
from grit.node import misc
from grit import util
class StopParsingException(Exception):
'''An exception used to stop parsing.'''
pass
class GrdContentHandler(xml.sax.handler.ContentHandler):
def __init__(self,
stop_after=None, debug=False, defines=None, tags_to_ignore=None):
# Invariant of data:
# 'root' is the root of the parse tree being created, or None if we haven't
# parsed out any elements.
# 'stack' is the a stack of elements that we push new nodes onto and
# pop from when they finish parsing, or [] if we are not currently parsing.
# 'stack[-1]' is the top of the stack.
self.root = None
self.stack = []
self.stop_after = stop_after
self.debug = debug
self.defines = defines
self.tags_to_ignore = tags_to_ignore or set()
self.ignore_depth = 0
def startElement(self, name, attrs):
assert not self.root or len(self.stack) > 0
if name in self.tags_to_ignore:
self.ignore_depth += 1
if self.debug:
print "Ignoring element %s and its children" % name
if self.ignore_depth != 0:
return
if self.debug:
attr_list = []
for attr in attrs.getNames():
attr_list.append('%s="%s"' % (attr, attrs.getValue(attr)))
if len(attr_list) == 0: attr_list = ['(none)']
attr_list = ' '.join(attr_list)
print ("Starting parsing of element %s with attributes %r" %
(name, attr_list))
typeattr = None
if 'type' in attrs.getNames():
typeattr = attrs.getValue('type')
node = mapping.ElementToClass(name, typeattr)()
if not self.root:
self.root = node
if self.defines:
self.root.SetDefines(self.defines)
if len(self.stack) > 0:
self.stack[-1].AddChild(node)
node.StartParsing(name, self.stack[-1])
else:
node.StartParsing(name, None)
# Push
self.stack.append(node)
for attr in attrs.getNames():
node.HandleAttribute(attr, attrs.getValue(attr))
def endElement(self, name):
if self.ignore_depth == 0:
if self.debug:
print "End parsing of element %s" % name
# Pop
self.stack[-1].EndParsing()
assert len(self.stack) > 0
self.stack = self.stack[:-1]
if self.stop_after and name == self.stop_after:
raise StopParsingException()
if name in self.tags_to_ignore:
self.ignore_depth -= 1
def characters(self, content):
if self.ignore_depth == 0:
if self.stack[-1]:
self.stack[-1].AppendContent(content)
def ignorableWhitespace(self, whitespace):
# TODO(joi) This is not supported by expat. Should use a different XML parser?
pass
def Parse(filename_or_stream, dir=None, flexible_root=False,
stop_after=None, first_ids_file=None, debug=False,
defines=None, tags_to_ignore=None):
'''Parses a GRD file into a tree of nodes (from grit.node).
If flexible_root is False, the root node must be a <grit> element. Otherwise
it can be any element. The "own" directory of the file will only be fixed up
if the root node is a <grit> element.
'dir' should point to the directory of the input file, or be the full path
to the input file (the filename will be stripped).
If 'stop_after' is provided, the parsing will stop once the first node
with this name has been fully parsed (including all its contents).
If 'debug' is true, lots of information about the parsing events will be
printed out during parsing of the file.
If 'first_ids_file' is non-empty, it is used to override the setting
for the first_ids_file attribute of the <grit> root node.
Args:
filename_or_stream: './bla.xml' (must be filename if dir is None)
dir: '.' or None (only if filename_or_stream is a filename)
flexible_root: True | False
stop_after: 'inputs'
first_ids_file: 'GRIT_DIR/../gritsettings/resource_ids'
debug: False
defines: dictionary of defines, like {'chromeos': '1'}
Return:
Subclass of grit.node.base.Node
Throws:
grit.exception.Parsing
'''
handler = GrdContentHandler(stop_after=stop_after, debug=debug,
defines=defines, tags_to_ignore=tags_to_ignore)
try:
xml.sax.parse(filename_or_stream, handler)
except StopParsingException:
assert stop_after
pass
except:
if not debug:
print "parse exception: run GRIT with the -x flag to debug .grd problems"
raise
if not flexible_root or hasattr(handler.root, 'SetOwnDir'):
assert isinstance(filename_or_stream, types.StringType) or dir != None
if not dir:
dir = util.dirname(filename_or_stream)
if len(dir) == 0:
dir = '.'
# Fix up the base_dir so it is relative to the input file.
handler.root.SetOwnDir(dir)
if isinstance(handler.root, misc.GritNode):
if first_ids_file:
handler.root.attrs['first_ids_file'] = first_ids_file
# Assign first ids to the nodes that don't have them.
handler.root.AssignFirstIds(filename_or_stream, defines)
return handler.root
if __name__ == '__main__':
util.ChangeStdoutEncoding()
print unicode(Parse(sys.argv[1]))
| JoKaWare/WTL-DUI | tools/grit/grit/grd_reader.py | Python | bsd-3-clause | 5,542 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.gather."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedGatherOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters([
# Basic gather (axis=0 and batch_dims=0)
dict(testcase_name='Params1DTensor_Indices1DTensor',
params=['a', 'b', 'c', 'd', 'e'],
indices=[2, 0, 2, 1],
expected=['c', 'a', 'c', 'b']),
dict(testcase_name='Params1DTensor_Indices2DRagged',
params=['a', 'b', 'c', 'd', 'e'],
indices=[[3, 1, 2], [1], [], [0]],
expected=[['d', 'b', 'c'], ['b'], [], ['a']]),
dict(testcase_name='Params2DRagged_Indices0DTensor',
params=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
indices=1,
expected=['c', 'd', 'e']),
dict(testcase_name='Params2DRagged_Indices1DTensor',
params=[['a', 'b', 'c'], ['d'], [], ['e']],
indices=[3, 1, 2, 1, 0],
expected=[
['e'], ['d'], [], ['d'], ['a', 'b', 'c']]),
dict(testcase_name='Params2DRagged_Indices2DRagged',
params=[['a', 'b', 'c'], ['d'], [], ['e']],
indices=[[3, 1, 2], [1], [], [0]],
expected=[
[['e'], ['d'], []], [['d']], [], [['a', 'b', 'c']]]),
dict(testcase_name='Params3DRagged_Indices2DTensor',
params=[
[['a', 'b'], []], [['c', 'd'], ['e'], ['f']], [['g']]],
indices=[[1, 2], [0, 1], [2, 2]],
indices_ragged_rank=0,
expected=[
[[['c', 'd'], ['e'], ['f']], [['g']]],
[[['a', 'b'], []], [['c', 'd'], ['e'], ['f']]],
[[['g']], [['g']]]]),
dict(testcase_name='Params3DRagged_Indices3DTensor',
params=[[['a', 'b'], []],
[['c', 'd'], ['e'], ['f']],
[['g']]],
indices=[[[1, 2], [0, 1], [2, 2]], [[0, 0], [1, 2], [0, 1]]],
indices_ragged_rank=0,
expected=[
[[[['c', 'd'], ['e'], ['f']], [['g']]],
[[['a', 'b'], []], [['c', 'd'], ['e'], ['f']]],
[[['g']], [['g']]]],
[[[['a', 'b'], []], [['a', 'b'], []]],
[[['c', 'd'], ['e'], ['f']], [['g']]],
[[['a', 'b'], []], [['c', 'd'], ['e'], ['f']]]]]),
dict(testcase_name='Params1DTensor_Indices4DRaggedRank2',
params=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
indices=[[[[3, 4], [0, 6]], []],
[[[2, 1], [1, 0]], [[2, 5]], [[2, 3]]],
[[[1, 0]]]],
indices_ragged_rank=2,
expected=[
[[['d', 'e'], ['a', 'g']], []],
[[['c', 'b'], ['b', 'a']], [['c', 'f']], [['c', 'd']]],
[[['b', 'a']]]]),
# Batch gather (batch_dims=1)
dict(testcase_name='Batch1D_Params2DRagged_Indices1DTensor',
params=[['a', 'b'], ['c'], ['d', 'e', 'f', 'g'], ['h']],
indices=[1, 0, 3, 0],
batch_dims=1,
expected=['b', 'c', 'g', 'h']),
dict(testcase_name='Batch1D_Params2DRagged_Indices2DTensor',
params=[['a', 'b'], ['c'], ['d', 'e', 'f', 'g'], ['h']],
indices=[[1, 0], [0, 0], [3, 1], [0, 0]],
indices_ragged_rank=0,
batch_dims=1,
expected=[['b', 'a'], ['c', 'c'], ['g', 'e'], ['h', 'h']]),
dict(testcase_name='Batch1D_Params2DRagged_Indices2DRagged',
params=[['a', 'b'], ['c'], ['d', 'e', 'f', 'g'], ['h']],
indices=[[1, 0], [], [3, 2, 1], [0]],
batch_dims=1,
expected=[['b', 'a'], [], ['g', 'f', 'e'], ['h']]),
dict(testcase_name='Batch1D_Params3DRagged_Indices3DRagged',
params=[[['a'], ['b', 'c']],
[],
[['d', 'e', 'f'], ['g'], ['h', 'i'], ['j']],
[['k']]],
indices=[[[1, 0], []], [], [[3, 2, 1], [0]], [[0]]],
batch_dims=1,
expected=[[[['b', 'c'], ['a']], []],
[],
[[['j'], ['h', 'i'], ['g']], [['d', 'e', 'f']]],
[[['k']]]]),
# Batch gather (batch_dims=2)
dict(testcase_name='Batch2D_Params3DRagged_Indices2DRagged',
params=[[['a', 'b', 'c'], ['d', 'e'], ['f']],
[['g'], ['h', 'i']]],
indices=[[0, 1, 0], [0, 1]],
batch_dims=2,
expected=[['a', 'e', 'f'], ['g', 'i']]),
dict(testcase_name='Batch2D_Params3DRagged_Indices3DRagged',
params=[[['a', 'b', 'c'], ['d', 'e'], ['f']],
[['g'], ['h', 'i']]],
indices=[[[2, 1, 0], [1, 1], [0]], [[0], []]],
batch_dims=2,
expected=[[['c', 'b', 'a'], ['e', 'e'], ['f']], [['g'], []]]),
# Batch gather (batch_dims=3)
dict(testcase_name='Batch3D_Params4DRagged_Indices3DRagged',
params=[[[['a', 'b', 'c'], ['d', 'e'], ['f']],
[['g'], ['h', 'i']]], [[['j']]]],
indices=[[[0, 1, 0], [0, 1]], [[0]]],
batch_dims=3,
expected=[[['a', 'e', 'f'], ['g', 'i']], [['j']]]),
# Axis gather (axis=1)
dict(testcase_name='Params2DRagged_Indices0DTensor_axis_1',
params=[['a', 'b'], ['c', 'd', 'e'], ['f', 'g'], ['h', 'i', 'j'],
['k', 'l']],
indices=1,
axis=1,
expected=['b', 'd', 'g', 'i', 'l']),
dict(testcase_name='Params2DRagged_Indices1DTensor_axis_1',
params=[['a', 'b'], ['c', 'd', 'e'], ['f', 'g'], ['h', 'i', 'j'],
['k', 'l']],
indices=[1, 0],
axis=1,
expected=[['b', 'a'], ['d', 'c'], ['g', 'f'], ['i', 'h'],
['l', 'k']]),
dict(testcase_name='Params3DRagged_Indices0DTensor_axis_1',
params=[[['a', 'b'], ['c', 'd', 'e']],
[['f', 'g'], ['h', 'i', 'j'], ['k', 'l']]],
indices=1,
axis=1,
expected=[['c', 'd', 'e'], ['h', 'i', 'j']]),
dict(testcase_name='Params3DRagged_Indices1DTensor_axis_1',
params=[[['a', 'b'], ['c', 'd', 'e']],
[['f', 'g'], ['h', 'i', 'j'], ['k', 'l']]],
indices=[1, 0],
axis=1,
expected=[[['c', 'd', 'e'], ['a', 'b']],
[['h', 'i', 'j'], ['f', 'g']]]),
# Batch/axis gather, batch = 1, axis > batch
dict(testcase_name='Params3DRagged_Indices1DTensor_batch_1_axis_2',
params=[[['a', 'b'], ['c', 'd', 'e']],
[['f', 'g'], ['h', 'i', 'j'], ['k', 'l']]],
indices=[1, 0],
axis=2,
batch_dims=1,
expected=[['b', 'd'], ['f', 'h', 'k']]),
dict(testcase_name='Params4DRagged_Indices1DTensor_batch_1_axis_2',
params=[[[['a', 'b'], ['c', 'd', 'e']]],
[[['f', 'g']], [['h', 'i', 'j'], ['k', 'l']]]],
indices=[0, 1],
axis=2,
batch_dims=1,
expected=[[['a', 'b']],
[['h', 'i', 'j'], ['k', 'l']]]),
]) # pyformat: disable
def testRaggedGather(self,
params,
indices,
expected,
axis=None,
batch_dims=0,
params_ragged_rank=None,
indices_ragged_rank=None):
params = ragged_factory_ops.constant(params, ragged_rank=params_ragged_rank)
indices = ragged_factory_ops.constant(
indices, ragged_rank=indices_ragged_rank)
actual = ragged_gather_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(actual, self._str_to_bytes(expected))
def _str_to_bytes(self, x):
if isinstance(x, list):
return [self._str_to_bytes(v) for v in x]
elif isinstance(x, str) and bytes is not str:
return bytes(x, 'utf-8')
else:
return x
def testOutOfBoundsError(self):
tensor_params = ['a', 'b', 'c']
tensor_indices = [0, 1, 2]
ragged_params = ragged_factory_ops.constant([['a', 'b'], ['c']])
ragged_indices = ragged_factory_ops.constant([[0, 3]])
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'indices\[1\] = 3 is not in \[0, 3\)'):
self.evaluate(ragged_gather_ops.gather(tensor_params, ragged_indices))
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'indices\[2\] = 2 is not in \[0, 2\)'):
self.evaluate(ragged_gather_ops.gather(ragged_params, tensor_indices))
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'indices\[1\] = 3 is not in \[0, 2\)'):
self.evaluate(ragged_gather_ops.gather(ragged_params, ragged_indices))
def testUnknownIndicesRankError(self):
if context.executing_eagerly():
return
params = ragged_factory_ops.constant([], ragged_rank=1)
indices = constant_op.constant([0], dtype=dtypes.int64)
indices = array_ops.placeholder_with_default(indices, None)
self.assertRaisesRegex(ValueError,
r'rank\(indices\) must be known statically',
ragged_gather_ops.gather, params, indices)
# pylint: disable=bad-whitespace
@parameterized.parameters([
# params.shape=[2, None]; indices.shape=[3]
dict(
params = [[1.0, 2.0], [3.0, 4.0, 5.0]],
indices = [0, 0, 1],
expected_out = [[1.0, 2.0], [1.0, 2.0], [3.0, 4.0, 5.0]],
out_grad = [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6, 0.7]],
expected_grad = [[0.4, 0.6], [0.5, 0.6, 0.7]]),
# params.shape=[2, None]; indices.shape=[0]
dict(
params = [[1, 2], [3, 4, 5]],
indices = [],
expected_out = [],
out_grad = [],
expected_grad = [[0, 0], [0, 0, 0]]),
# params.shape=[2, None]; indices.shape=[2, 2]
dict(
params = [[1.0, 2.0], [3.0, 4.0, 5.0]],
indices = [[0, 0], [1, 0]],
expected_out = [[[1.0, 2.0], [1.0, 2.0]],
[[3.0, 4.0, 5.0], [1.0, 2.0]]],
out_grad = [[[0.1, 0.2], [0.3, 0.4]],
[[0.5, 0.6, 0.7], [0.8, 0.9]]],
expected_grad = [[1.2, 1.5], [0.5, 0.6, 0.7]]),
# params.shape=[3, None, None]; indices.shape=[3]
dict(
params = [[[1, 2], [3, 4, 5]], [[6.0]], [[7.0, 8.0]]],
indices = [2, 1, 2],
expected_out = [[[7.0, 8.0]], [[6.0]], [[7.0, 8.0]]],
out_grad = [[[0.1, 0.2]], [[0.3]], [[0.4, 0.5]]],
expected_grad = [[[0, 0], [0, 0, 0]], [[0.3]], [[0.5, 0.7]]]),
# params.shape=[3, None, None]; indices.shape=[0]
dict(
params = [[[1, 2], [3, 4, 5]], [[6.0]], [[7.0, 8.0]]],
indices = [2, 1, 2],
expected_out = [[[7.0, 8.0]], [[6.0]], [[7.0, 8.0]]],
out_grad = [[[0.1, 0.2]], [[0.3]], [[0.4, 0.5]]],
expected_grad = [[[0, 0], [0, 0, 0]], [[0.3]], [[0.5, 0.7]]]),
# params.shape=[0, None]; indices.shape=[0]
dict(
params = [],
indices = [],
expected_out = [],
out_grad = [],
expected_grad = [],
params_ragged_rank = 1),
# params.shape=[2, None, 2]; indices.shape=[3]
dict(
params = [[[1, 2], [3, 4]], [], [[5, 6]]],
indices = [1, 1, 2, 0, 2],
expected_out = [[], [], [[5, 6]], [[1, 2], [3, 4]], [[5, 6]]],
out_grad = [[], [], [[1, 2]], [[3, 4], [5, 6]], [[7, 7]]],
expected_grad = [[[3, 4], [5, 6]], [], [[8, 9]]],
params_ragged_rank = 1),
]) # pyformat: disable
@test_util.run_deprecated_v1
def testGradient(self,
params,
indices,
expected_out,
out_grad,
expected_grad,
params_ragged_rank=None):
"""Tests that ragged_gather generates the right gradient.
Args:
params: The `params` that should be passed to `gather`.
indices: The `indices` that should be passed to `gather`.
expected_out: The expected value of `gather(params, indices)`.
`expected_out.shape = indices.shape + params.shape[1:]`.
out_grad: The value that should be fed in as the gradient for `out`
when testing the gradient of `ragged_gather`. Must have the same
shape as `expected_out`.
expected_grad: The expected gradient for that should be returned for
`params`. Must have hte same shape as `params`.
params_ragged_rank: The ragged_rank of `params`.
"""
if context.executing_eagerly():
return
params = ragged_factory_ops.constant(
params, dtype=dtypes.float32, ragged_rank=params_ragged_rank)
indices = constant_op.constant(indices, dtype=dtypes.int32)
out_ragged_rank = params.ragged_rank + indices.shape.ndims - 1
out_grad = ragged_factory_ops.constant(
out_grad, dtype=dtypes.float32, ragged_rank=out_ragged_rank)
expected_out = ragged_factory_ops.constant(
expected_out, dtype=dtypes.float32, ragged_rank=out_ragged_rank)
expected_grad = ragged_factory_ops.constant(
expected_grad,
dtype=dtypes.float32,
ragged_rank=params.ragged_rank)
out = ragged_gather_ops.gather(params, indices)
self.assertAllClose(out, expected_out)
grads = gradients_impl.gradients(
out.flat_values,
(params.nested_row_splits + (params.flat_values, indices,)),
out_grad.flat_values)
param_nested_splits_grads = grads[:-2]
params_flat_values_grad = grads[-2]
indices_grad = grads[-1]
self.assertEqual(indices_grad, None)
for splits_grad in param_nested_splits_grads:
self.assertEqual(splits_grad, None)
# The gradient generates an IndexedSlices; convert back to a normal Tensor.
self.assertIsInstance(params_flat_values_grad, indexed_slices.IndexedSlices)
params_flat_values_grad = ops.convert_to_tensor(params_flat_values_grad)
params_grad = params.with_flat_values(params_flat_values_grad)
self.assertAllClose(params_grad, expected_grad, atol=2e-6, rtol=2e-6)
@parameterized.parameters([
# Basic gather (batch_dims == 0, axis == 0)
dict(params_shape=[3, 4], indices_shape=[], axis=0),
dict(params_shape=[3, 4], indices_shape=[5], axis=0),
dict(params_shape=[3, 4], indices_shape=[2, 5], axis=0),
# Gather over axis (axis > 0)
dict(params_shape=[3, 4], indices_shape=[], axis=1),
dict(params_shape=[3, 4], indices_shape=[2], axis=1),
dict(params_shape=[3, 4], indices_shape=[2, 5], axis=1),
dict(params_shape=[7, 3, 1], indices_shape=[2, 4], axis=1),
dict(params_shape=[3, 4, 5, 6], indices_shape=[2, 1, 7], axis=1),
dict(params_shape=[7, 3, 5], indices_shape=[], axis=2),
dict(params_shape=[7, 3, 5], indices_shape=[2], axis=2),
dict(params_shape=[7, 3, 5], indices_shape=[4, 2], axis=2),
dict(params_shape=[7, 3, 5, 6], indices_shape=[4, 2], axis=2),
dict(params_shape=[7, 3, 5, 6], indices_shape=[], axis=3),
dict(params_shape=[7, 3, 5, 6], indices_shape=[4], axis=3),
dict(params_shape=[7, 3, 5, 6], indices_shape=[8, 4], axis=3),
dict(params_shape=[7, 3, 5, 6], indices_shape=[2, 3, 2, 3], axis=3),
# Batched gather (batch_dims > 0)
dict(params_shape=[7, 3], indices_shape=[7], batch_dims=1),
dict(params_shape=[7, 3], indices_shape=[7, 5], batch_dims=1),
dict(params_shape=[5, 3], indices_shape=[5, 7, 4, 2], batch_dims=1),
dict(params_shape=[2, 3, 6], indices_shape=[2], batch_dims=1),
dict(params_shape=[7, 3, 6], indices_shape=[7, 5, 4, 2], batch_dims=1),
dict(params_shape=[7, 3, 5], indices_shape=[7, 3], batch_dims=2),
dict(params_shape=[7, 3, 5], indices_shape=[7, 3, 2], batch_dims=2),
dict(params_shape=[7, 3, 5, 6], indices_shape=[7, 3, 5], batch_dims=3),
dict(params_shape=[2, 3, 5, 6], indices_shape=[2, 3, 5, 7], batch_dims=3),
# Batched gather with axis (axis > batch_dims > 0)
dict(params_shape=[2, 3, 6], indices_shape=[2], axis=2, batch_dims=1),
dict(params_shape=[2, 3, 6], indices_shape=[2, 4], axis=2, batch_dims=1),
dict(
params_shape=[3, 1, 6, 7], indices_shape=[3, 4], axis=3,
batch_dims=1),
dict(
params_shape=[3, 2, 6, 7], indices_shape=[3, 4], axis=3,
batch_dims=1),
dict(
params_shape=[2, 3, 6, 7], indices_shape=[2, 3], axis=3,
batch_dims=2),
])
def testMatchesDenseGather(self,
params_shape,
indices_shape,
axis=None,
batch_dims=0):
# Build random params & indices matrics w/ the expected shapes.
if axis is None:
axis = batch_dims
params = np.random.randint(100, size=params_shape, dtype=np.int32)
indices = np.random.randint(
params_shape[axis], size=indices_shape, dtype=np.int32)
# Use array_ops.gather to get the expected value.
expected = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
# Build ragged tensors with varying ragged_ranks from params & axis.
params_tensors = [params] + [
ragged_tensor.RaggedTensor.from_tensor(params, ragged_rank=i)
for i in range(1, len(params_shape))
]
indices_tensors = [indices] + [
ragged_tensor.RaggedTensor.from_tensor(indices, ragged_rank=i)
for i in range(1, len(indices_shape))
]
# For each combination of params & axis tensors, check that
# ragged_gather_ops.gather matches array_ops.gather.
for params_tensor in params_tensors:
for indices_tensor in indices_tensors:
actual = ragged_gather_ops.gather(
params_tensor, indices_tensor, axis=axis, batch_dims=batch_dims)
if isinstance(actual, ragged_tensor.RaggedTensor):
actual = actual.to_tensor()
self.assertAllEqual(
expected, actual, 'params.ragged_rank=%s, indices.ragged_rank=%s' %
(getattr(params_tensor, 'ragged_rank',
0), getattr(indices_tensor, 'ragged_rank', 0)))
if __name__ == '__main__':
googletest.main()
| tensorflow/tensorflow | tensorflow/python/ops/ragged/ragged_gather_op_test.py | Python | apache-2.0 | 19,868 |
# lock.py
# DNF Locking Subsystem.
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.exceptions import ProcessLockError, ThreadLockError, LockError
from dnf.i18n import _
from dnf.yum import misc
import dnf.logging
import dnf.util
import hashlib
import logging
import os
import threading
import time
logger = logging.getLogger("dnf")
def _fit_lock_dir(dir_):
if not dnf.util.am_i_root():
# for regular users the best we currently do is not to clash with
# another DNF process of the same user. Since dir_ is quite definitely
# not writable for us, yet significant, use its hash:
hexdir = hashlib.md5(dir_.encode('utf-8')).hexdigest()
dir_ = os.path.join(misc.getCacheDir(), 'locks', hexdir)
return dir_
def build_download_lock(cachedir):
return ProcessLock(os.path.join(_fit_lock_dir(cachedir), 'download_lock.pid'),
'cachedir', True)
def build_metadata_lock(cachedir):
return ProcessLock(os.path.join(_fit_lock_dir(cachedir), 'metadata_lock.pid'),
'metadata', True)
def build_rpmdb_lock(persistdir):
return ProcessLock(os.path.join(_fit_lock_dir(persistdir), 'rpmdb_lock.pid'),
'RPMDB', True)
class ProcessLock(object):
def __init__(self, target, description, blocking=False):
self.blocking = blocking
self.count = 0
self.description = description
self.target = target
self.thread_lock = threading.RLock()
def _lock_thread(self):
if not self.thread_lock.acquire(blocking=False):
msg = '%s already locked by a different thread' % self.description
raise ThreadLockError(msg)
self.count += 1
def _try_lock(self):
pid = str(os.getpid()).encode('utf-8')
try:
fd = os.open(self.target, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o644)
os.write(fd, pid)
os.close(fd)
return True
except OSError:
return False
def _try_read_lock(self):
try:
with open(self.target, 'r') as f:
return int(f.readline())
except IOError:
return -1
except ValueError:
time.sleep(2)
try:
with open(self.target, 'r') as f:
return int(f.readline())
except IOError:
return -1
except ValueError:
msg = _('Malformed lock file found: %s.\n'
'Ensure no other dnf process is running and '
'remove the lock file manually or run '
'systemd-tmpfiles --remove dnf.conf.' % (self.target))
raise LockError(msg)
def _try_unlink(self):
try:
os.unlink(self.target)
return True
except OSError:
return False
def _unlock_thread(self):
self.count -= 1
self.thread_lock.release()
def __enter__(self):
dnf.util.ensure_dir(os.path.dirname(self.target))
self._lock_thread()
inform = True
prev_pid = 0
while not self._try_lock():
pid = self._try_read_lock()
if pid == -1:
# already removed by other process
continue
if pid == os.getpid():
# already locked by this process
return
if not os.access('/proc/%d/stat' % pid, os.F_OK):
# locked by a dead process
self._try_unlink()
continue
if not self.blocking:
self._unlock_thread()
msg = '%s already locked by %d' % (self.description, pid)
raise ProcessLockError(msg, pid)
if inform or prev_pid != pid:
msg = _('Waiting for process with pid %d to finish.' % (pid))
logger.info(msg)
inform = False
prev_pid = pid
time.sleep(2)
def __exit__(self, *exc_args):
if self.count == 1:
os.unlink(self.target)
self._unlock_thread()
| atodorov/dnf | dnf/lock.py | Python | gpl-2.0 | 5,154 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_scheduled_task
version_added: "2.0"
short_description: Manage scheduled tasks
description:
- Creates/modified or removes Windows scheduled tasks.
notes:
- In Ansible 2.4 and earlier, this could only be run on Server 2012/Windows 8
or newer. Since 2.5 this restriction has been lifted.
- The option names and structure for actions and triggers of a service follow
the C(RegisteredTask) naming standard and requirements, it would be useful to
read up on this guide if coming across any issues U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa382542.aspx).
options:
# module definition options
name:
description:
- The name of the scheduled task without the path.
required: yes
path:
description:
- Task folder in which this task will be stored.
- Will create the folder when C(state=present) and the folder does not
already exist.
- Will remove the folder when C(state=absent) and there are no tasks left
in the folder.
default: \
state:
description:
- When C(state=present) will ensure the task exists.
- When C(state=absent) will ensure the task does not exist.
choices: [ absent, present ]
default: present
# Action options
actions:
description:
- A list of action to configure for the task.
- See suboptions for details on how to construct each list entry.
- When creating a task there MUST be at least one action but when deleting
a task this can be a null or an empty list.
- The ordering of this list is important, the module will ensure the order
is kept when modifying the task.
- This module only supports the C(ExecAction) type but can still delete the
older legacy types.
type: list
suboptions:
path:
description:
- The path to the executable for the ExecAction.
required: yes
arguments:
description:
- An argument string to supply for the executable.
working_directory:
description:
- The working directory to run the executable from.
version_added: '2.5'
# Trigger options
triggers:
description:
- A list of triggers to configure for the task.
- See suboptions for details on how to construct each list entry.
- The ordering of this list is important, the module will ensure the order
is kept when modifying the task.
- There are multiple types of triggers, see U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa383868.aspx)
for a list of trigger types and their options.
- The suboption options listed below are not required for all trigger
types, read the description for more details.
type: list
suboptions:
type:
description:
- The trigger type, this value controls what below options are
required.
required: yes
choices: [ boot, daily, event, idle, logon, monthlydow, monthly, registration, time, weekly, session_state_change ]
enabled:
description:
- Whether to set the trigger to enabled or disabled
- Used in all trigger types.
type: bool
start_boundary:
description:
- The start time for the task, even if the trigger meets the other
start criteria, it won't start until this time is met.
- If you wish to run a task at 9am on a day you still need to specify
the date on which the trigger is activated, you can set any date even
ones in the past.
- Required when C(type) is C(daily), C(monthlydow), C(monthly),
C(time), C(weekly), (session_state_change).
- Optional for the rest of the trigger types.
- This is in ISO 8601 DateTime format C(YYYY-MM-DDThh:mm:ss).
end_boundary:
description:
- The end time for when the trigger is deactivated.
- This is in ISO 8601 DateTime format C(YYYY-MM-DDThh:mm:ss).
execution_time_limit:
description:
- The maximum amount of time that the task is allowed to run for.
- Optional for all the trigger types.
- Is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
delay:
description:
- The time to delay the task from running once the trigger has been
fired.
- Optional when C(type) is C(boot), C(event), C(logon),
C(registration), C(session_state_change).
- Is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
random_delay:
description:
- The delay time that is randomly added to the start time of the
trigger.
- Optional when C(type) is C(daily), C(monthlydow), C(monthly),
C(time), C(weekly).
- Is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
subscription:
description:
- Only used and is required for C(type=event).
- The XML query string that identifies the event that fires the
trigger.
user_id:
description:
- The username that the trigger will target.
- Optional when C(type) is C(logon), C(session_state_change).
- Can be the username or SID of a user.
- When C(type=logon) and you want the trigger to fire when a user in a
group logs on, leave this as null and set C(group) to the group you
wish to trigger.
days_of_week:
description:
- The days of the week for the trigger.
- Can be a list or comma separated string of full day names e.g. monday
instead of mon.
- Required when C(type) is C(weekly), C(type=session_state_change).
- Optional when C(type=monthlydow).
days_of_month:
description:
- The days of the month from 1 to 31 for the triggers.
- If you wish to set the trigger for the last day of any month
use C(run_on_last_day_of_month).
- Can be a list or comma separated string of day numbers.
- Required when C(type=monthly).
weeks_of_month:
description:
- The weeks of the month for the trigger.
- Can be a list or comma separated string of the numbers 1 to 4
representing the first to 4th week of the month.
- Optional when C(type=monthlydow).
months_of_year:
description:
- The months of the year for the trigger.
- Can be a list or comma separated string of full month names e.g.
march instead of mar.
- Optional when C(type) is C(monthlydow), C(monthly).
run_on_last_week_of_month:
description:
- Boolean value that sets whether the task runs on the last week of the
month.
- Optional when C(type) is C(monthlydow).
type: bool
run_on_last_day_of_month:
description:
- Boolean value that sets whether the task runs on the last day of the
month.
- Optional when C(type) is C(monthly).
type: bool
weeks_interval:
description:
- The interval of weeks to run on, e.g. C(1) means every week while
C(2) means every other week.
- Optional when C(type=weekly).
repetition:
description:
- Allows you to define the repetition action of the trigger that defines how often the task is run and how long the repetition pattern is repeated
after the task is started.
- It takes in the following keys, C(duration), C(interval), C(stop_at_duration_end)
suboptions:
duration:
description:
- Defines how long the pattern is repeated.
- The value is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
- By default this is not set which means it will repeat indefinitely.
type: str
interval:
description:
- The amount of time between each restart of the task.
- The value is written in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
type: str
stop_at_duration_end:
description:
- Whether a running instance of the task is stopped at the end of the repetition pattern.
type: bool
version_added: '2.5'
# Principal options
display_name:
description:
- The name of the user/group that is displayed in the Task Scheduler UI.
version_added: '2.5'
group:
description:
- The group that will run the task.
- C(group) and C(username) are exclusive to each other and cannot be set
at the same time.
- C(logon_type) can either be not set or equal C(group).
version_added: '2.5'
logon_type:
description:
- The logon method that the task will run with.
- C(password) means the password will be stored and the task has access
to network resources.
- C(s4u) means the existing token will be used to run the task and no
password will be stored with the task. Means no network or encrypted
files access.
- C(interactive_token) means the user must already be logged on
interactively and will run in an existing interactive session.
- C(group) means that the task will run as a group.
- C(service_account) means that a service account like System, Local
Service or Network Service will run the task.
choices: [ none, password, s4u, interactive_token, group, service_account, token_or_password ]
version_added: '2.5'
run_level:
description:
- The level of user rights used to run the task.
- If not specified the task will be created with limited rights.
choices: [ limited, highest ]
version_added: '2.4'
aliases: [ runlevel ]
username:
description:
- The user to run the scheduled task as.
- Will default to the current user under an interactive token if not
specified during creation.
aliases: [ user ]
password:
description:
- The password for the user account to run the scheduled task as.
- This is required when running a task without the user being logged in,
excluding the builtin service accounts.
- If set, will always result in a change unless C(update_password) is set
to C(no) and no othr changes are required for the service.
version_added: '2.4'
update_password:
description:
- Whether to update the password even when not other changes have occured.
- When C(yes) will always result in a change when executing the module.
type: bool
default: 'yes'
version_added: '2.5'
# RegistrationInfo options
author:
description:
- The author of the task.
version_added: '2.5'
date:
description:
- The date when the task was registered.
version_added: '2.5'
description:
description:
- The description of the task.
version_added: '2.5'
source:
description:
- The source of the task.
version_added: '2.5'
version:
description:
- The version number of the task.
version_added: '2.5'
# Settings options
allow_demand_start:
description:
- Whether the task can be started by using either the Run command or the
Context menu.
type: bool
version_added: '2.5'
allow_hard_terminate:
description:
- Whether the task can be terminated by using TerminateProcess.
type: bool
version_added: '2.5'
compatibility:
description:
- The integer value with indicates which version of Task Scheduler a task
is compatible with.
- C(0) means the task is compatible with the AT command.
- C(1) means the task is compatible with Task Scheduler 1.0.
- C(2) means the task is compatible with Task Scheduler 2.0.
type: int
choices: [ 0, 1, 2 ]
version_added: '2.5'
delete_expired_task_after:
description:
- The amount of time that the Task Scheduler will wait before deleting the
task after it expires.
- A task expires after the end_boundary has been exceeded for all triggers
associated with the task.
- This is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
version_added: '2.5'
disallow_start_if_on_batteries:
description:
- Whether the task will not be started if the computer is running on
battery power.
type: bool
version_added: '2.5'
enabled:
description:
- Whether the task is enabled, the task can only run when C(yes).
type: bool
version_added: '2.5'
execution_time_limit:
description:
- The amount of time allowed to complete the task.
- When not set, the time limit is infinite.
- This is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
version_added: '2.5'
hidden:
description:
- Whether the task will be hidden in the UI.
type: bool
version_added: '2.5'
multiple_instances:
description:
- An integer that indicates the behaviour when starting a task that is
already running.
- C(0) will start a new instance in parallel with existing instances of
that task.
- C(1) will wait until other instances of that task to finish running
before starting itself.
- C(2) will not start a new instance if another is running.
- C(3) will stop other instances of the task and start the new one.
type: int
choices: [ 0, 1, 2, 3 ]
version_added: '2.5'
priority:
description:
- The priority level (0-10) of the task.
- When creating a new task the default if C(7).
- See U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa383512.aspx)
for details on the priority levels.
type: int
version_added: '2.5'
restart_count:
description:
- The number of times that the Task Scheduler will attempt to restart the
task.
type: int
version_added: '2.5'
restart_interval:
description:
- How long the Task Scheduler will attempt to restart the task.
- If this is set then C(restart_count) must also be set.
- The maximum allowed time is 31 days.
- The minimum allowed time is 1 minute.
- This is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
version_added: '2.5'
run_only_if_idle:
description:
- Whether the task will run the task only if the computer is in an idle
state.
type: bool
version_added: '2.5'
run_only_if_network_available:
description:
- Whether the task will run only when a network is available.
type: bool
version_added: '2.5'
start_when_available:
description:
- Whether the task can start at any time after its scheduled time has
passed.
type: bool
version_added: '2.5'
stop_if_going_on_batteries:
description:
- Whether the task will be stopped if the computer begins to run on battery
power.
type: bool
version_added: '2.5'
wake_to_run:
description:
- Whether the task will wake the computer when it is time to run the task.
type: bool
version_added: '2.5'
author:
- Peter Mounce (@petemounce)
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: create a task to open 2 command prompts as SYSTEM
win_scheduled_task:
name: TaskName
description: open command prompt
actions:
- path: cmd.exe
arguments: /c hostname
- path: cmd.exe
arguments: /c whoami
triggers:
- type: daily
start_boundary: '2017-10-09T09:00:00'
username: SYSTEM
state: present
enabled: yes
- name: create task to run a PS script as NETWORK service on boot
win_scheduled_task:
name: TaskName2
description: Run a PowerShell script
actions:
- path: C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe
arguments: -ExecutionPolicy Unrestricted -NonInteractive -File C:\TestDir\Test.ps1
triggers:
- type: boot
username: NETWORK SERVICE
run_level: highest
state: present
- name: change above task to run under a domain user account, storing the passwords
win_scheduled_task:
name: TaskName2
username: DOMAIN\User
password: Password
logon_type: password
- name: change the above task again, choosing not to store the password
win_scheduled_task:
name: TaskName2
username: DOMAIN\User
logon_type: s4u
- name: create task with multiple triggers
win_scheduled_task:
name: TriggerTask
path: \Custom
actions:
- path: cmd.exe
triggers:
- type: daily
- type: monthlydow
username: SYSTEM
- name: set logon type to password but don't force update the password
win_scheduled_task:
name: TriggerTask
path: \Custom
actions:
- path: cmd.exe
username: Administrator
password: password
update_password: no
- name: disable a task that already exists
win_scheduled_task:
name: TaskToDisable
enabled: no
- name: create a task that will be repeated every minute for five minutes
win_scheduled_task:
name: RepeatedTask
description: open command prompt
actions:
- path: cmd.exe
arguments: /c hostname
triggers:
- type: registration
repetition:
interval: PT1M
duration: PT5M
stop_at_duration_end: yes
'''
RETURN = r'''
'''
| alexlo03/ansible | lib/ansible/modules/windows/win_scheduled_task.py | Python | gpl-3.0 | 17,525 |
#!/usr/bin/python
#
# Cppcheck - A tool for static C/C++ code analysis
# Copyright (C) 2007-2015 Daniel Marjamaeki and Cppcheck team.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Extract test cases information from Cppcheck test
file
"""
import os
import sys
import re
class Extract:
"""
Read Cppcheck test file and create data
representation
"""
# array that stores all the test cases
nodes = []
def parseFile(self, filename):
"""
parse test file and add info to the nodes
variable
"""
name = '[0-9a-zA-Z_]+'
string = '\\"(.+)\\"'
testclass = None
functionName = None
fin = open(filename, 'r')
for line in fin:
# testclass starts
res = re.match('class (' + name + ')', line)
if res is not None:
testclass = res.group(1)
# end of testclass
if re.match('};', line) is not None:
testclass = None
# function start
res = re.match('\\s+void (' + name + ')\\(\\)', line)
if res is not None:
functionName = res.group(1)
elif re.match('\\s+}', line) is not None:
functionName = None
if functionName is None:
continue
# check
res = re.match('\s+check.*\(' + string, line)
if res is not None:
code = res.group(1)
# code..
res = re.match('\\s+' + string, line)
if res is not None:
code = code + res.group(1)
# assert
res = re.match('\\s+ASSERT_EQUALS\\(\\"([^"]*)\\",', line)
if res is not None and len(code) > 10:
node = {'testclass': testclass,
'functionName': functionName,
'code': code,
'expected': res.group(1)}
self.nodes.append(node)
code = ''
# close test file
fin.close()
def strtoxml(s):
"""Convert string to xml/html format"""
return s.replace('&', '&').replace('"', '"').replace('<', '<').replace('>', '>')
def trimname(name):
"""Trim test name. Trailing underscore and digits are removed"""
while name[-1].isdigit():
name = name[:-1]
if name[-1] == '_':
name = name[:-1]
return name
def writeHtmlFile(nodes, functionName, filename, errorsOnly):
"""Write html file for a function name"""
fout = open(filename, 'w')
fout.write('<html>\n')
fout.write('<head>\n')
fout.write(' <style type="text/css">\n')
fout.write(' body { font-size: 0.8em }\n')
fout.write(
' th { background-color: #A3C159; text-transform: uppercase }\n')
fout.write(' td { background-color: white; vertical-align: text-top }\n')
fout.write(' pre { background-color: #EEEEEE }\n')
fout.write(' </style>\n')
fout.write('</head>\n')
fout.write('<body>\n')
fout.write('<a href="index.htm">Home</a> -- ')
if errorsOnly:
fout.write('<a href="all-' + functionName + '.htm">All test cases</a>')
else:
fout.write(
'<a href="errors-' + functionName + '.htm">Error test cases</a>')
fout.write('<br><br>')
testclass = None
num = 0
for node in nodes:
if errorsOnly and node['expected'] == '':
continue
if trimname(node['functionName']) == functionName:
num = num + 1
if not testclass:
testclass = node['testclass']
fout.write(
'<h1>' + node['testclass'] + '::' + functionName + '</h1>')
fout.write('<table border="0" cellspacing="0">\n')
fout.write(
' <tr><th>Nr</th><th>Code</th><th>Expected</th></tr>\n')
fout.write(' <tr><td>' + str(num) + '</td>')
fout.write('<td><pre>' + strtoxml(
node['code']).replace('\\n', '\n') + '</pre></td>')
fout.write(
'<td>' + strtoxml(node['expected']).replace('\\n', '<br>') + '</td>')
fout.write('</tr>\n')
if testclass is not None:
fout.write('</table>\n')
fout.write('</body></html>\n')
fout.close()
if len(sys.argv) <= 1 or '--help' in sys.argv:
print ('Extract test cases from test file')
print (
'Syntax: extracttests.py [--html=folder] [--xml] [--code=folder] path/testfile.cpp')
sys.exit(0)
# parse command line
xml = False
filename = None
htmldir = None
codedir = None
for arg in sys.argv[1:]:
if arg == '--xml':
xml = True
elif arg.startswith('--html='):
htmldir = arg[7:]
elif arg.startswith('--code='):
codedir = arg[7:]
elif arg.endswith('.cpp'):
filename = arg
else:
print ('Invalid option: ' + arg)
sys.exit(1)
# extract test cases
if filename is not None:
# parse test file
e = Extract()
e.parseFile(filename)
# generate output
if xml:
print ('<?xml version="1.0"?>')
print ('<tree>')
count = 0
for node in e.nodes:
s = ' <node'
s += ' function="' + node['functionName'] + '"'
s += ' code="' + strtoxml(node['code']) + '"'
s += ' expected="' + strtoxml(node['expected']) + '"'
s += '/>'
print (s)
print ('</tree>')
elif htmldir is not None:
if not htmldir.endswith('/'):
htmldir += '/'
if not os.path.exists(htmldir):
os.mkdir(htmldir)
findex = open(htmldir + 'index.htm', 'w')
findex.write('<html>\n')
findex.write('<head>\n')
findex.write(' <style type="text/css">\n')
findex.write(' table { font-size: 0.8em }\n')
findex.write(
' th { background-color: #A3C159; text-transform: uppercase }\n')
findex.write(
' td { background-color: #F0FFE0; vertical-align: text-top }\n')
findex.write(' A:link { text-decoration: none }\n')
findex.write(' A:visited { text-decoration: none }\n')
findex.write(' A:active { text-decoration: none }\n')
findex.write(' A:hover { text-decoration: underline; color: blue }\n')
findex.write(' </style>\n')
findex.write('</head>\n')
findex.write('<body>\n')
findex.write('<h1>' + filename + '</h1>\n')
functionNames = []
for node in e.nodes:
functionname = trimname(node['functionName'])
if functionname not in functionNames:
functionNames.append(functionname)
functionNames.sort()
findex.write('<table border="0" cellspacing="0">\n')
findex.write(' <tr><th>Name</th><th>Errors</th><th>All</th></tr>\n')
for functionname in functionNames:
findex.write(' <tr><td>' + functionname + '</td>')
numall = 0
numerr = 0
for node in e.nodes:
if trimname(node['functionName']) == functionname:
numall = numall + 1
if node['expected'] != '':
numerr = numerr + 1
if numerr == 0:
findex.write('<td><div align="right">0</div></td>')
else:
findex.write('<td><a href="errors-' + functionname +
'.htm"><div align="right">' + str(numerr) + '</div></a></td>')
findex.write('<td><a href="all-' + functionname +
'.htm"><div align="right">' + str(numall) + '</div></a></td>')
findex.write('</tr>\n')
findex.write('</table>\n')
findex.write('</body></html>')
findex.close()
# create files for each functionName
for functionName in functionNames:
writeHtmlFile(e.nodes,
functionName,
htmldir + 'errors-' + functionName + '.htm',
True)
writeHtmlFile(e.nodes,
functionName,
htmldir + 'all-' + functionName + '.htm',
False)
elif codedir:
testnum = 0
if not codedir.endswith('/'):
codedir = codedir + '/'
if not os.path.exists(codedir):
os.mkdir(codedir)
errors = open(codedir + 'errors.txt', 'w')
for node in e.nodes:
testnum = testnum + 1
functionName = node['functionName']
code = node['code']
code = code.replace('\\n', '\n')
code = code.replace('\\"', '"')
expected = node['expected']
filename = '0000' + str(testnum) + '-'
filename = filename[-4:]
filename += functionName + '.cpp'
# source code
fout = open(codedir + filename, 'w')
fout.write(code)
fout.close()
# write 'expected' to errors.txt
if expected != '':
expected = expected.replace('\\n', '\n')
expected = expected.replace('\\"', '"')
expected = re.sub(
'\\[test.cp?p?:', '[' + filename + ':', expected)
errors.write(expected)
errors.close()
else:
for node in e.nodes:
print (node['functionName'])
| bolbol2night/cppcheck | tools/extracttests.py | Python | gpl-3.0 | 10,086 |
#!/usr/bin/python -u
#
#
#
#################################################################################
# Start off by implementing a general purpose event loop for anyones use
#################################################################################
import sys
import getopt
import os
import libvirt
import select
import errno
import time
import threading
# For the sake of demonstration, this example program includes
# an implementation of a pure python event loop. Most applications
# would be better off just using the default libvirt event loop
# APIs, instead of implementing this in python. The exception is
# where an application wants to integrate with an existing 3rd
# party event loop impl
#
# Change this to 'False' to make the demo use the native
# libvirt event loop impl
use_pure_python_event_loop = True
do_debug = False
def debug(msg):
global do_debug
if do_debug:
print(msg)
#
# This general purpose event loop will support waiting for file handle
# I/O and errors events, as well as scheduling repeatable timers with
# a fixed interval.
#
# It is a pure python implementation based around the poll() API
#
class virEventLoopPure:
# This class contains the data we need to track for a
# single file handle
class virEventLoopPureHandle:
def __init__(self, handle, fd, events, cb, opaque):
self.handle = handle
self.fd = fd
self.events = events
self.cb = cb
self.opaque = opaque
def get_id(self):
return self.handle
def get_fd(self):
return self.fd
def get_events(self):
return self.events
def set_events(self, events):
self.events = events
def dispatch(self, events):
self.cb(self.handle,
self.fd,
events,
self.opaque)
# This class contains the data we need to track for a
# single periodic timer
class virEventLoopPureTimer:
def __init__(self, timer, interval, cb, opaque):
self.timer = timer
self.interval = interval
self.cb = cb
self.opaque = opaque
self.lastfired = 0
def get_id(self):
return self.timer
def get_interval(self):
return self.interval
def set_interval(self, interval):
self.interval = interval
def get_last_fired(self):
return self.lastfired
def set_last_fired(self, now):
self.lastfired = now
def dispatch(self):
self.cb(self.timer,
self.opaque)
def __init__(self):
self.poll = select.poll()
self.pipetrick = os.pipe()
self.pendingWakeup = False
self.runningPoll = False
self.nextHandleID = 1
self.nextTimerID = 1
self.handles = []
self.timers = []
self.quit = False
# The event loop can be used from multiple threads at once.
# Specifically while the main thread is sleeping in poll()
# waiting for events to occur, another thread may come along
# and add/update/remove a file handle, or timer. When this
# happens we need to interrupt the poll() sleep in the other
# thread, so that it'll see the file handle / timer changes.
#
# Using OS level signals for this is very unreliable and
# hard to implement correctly. Thus we use the real classic
# "self pipe" trick. A anonymous pipe, with one end registered
# with the event loop for input events. When we need to force
# the main thread out of a poll() sleep, we simple write a
# single byte of data to the other end of the pipe.
debug("Self pipe watch %d write %d" %(self.pipetrick[0], self.pipetrick[1]))
self.poll.register(self.pipetrick[0], select.POLLIN)
# Calculate when the next timeout is due to occur, returning
# the absolute timestamp for the next timeout, or 0 if there is
# no timeout due
def next_timeout(self):
next = 0
for t in self.timers:
last = t.get_last_fired()
interval = t.get_interval()
if interval < 0:
continue
if next == 0 or (last + interval) < next:
next = last + interval
return next
# Lookup a virEventLoopPureHandle object based on file descriptor
def get_handle_by_fd(self, fd):
for h in self.handles:
if h.get_fd() == fd:
return h
return None
# Lookup a virEventLoopPureHandle object based on its event loop ID
def get_handle_by_id(self, handleID):
for h in self.handles:
if h.get_id() == handleID:
return h
return None
# This is the heart of the event loop, performing one single
# iteration. It asks when the next timeout is due, and then
# calcuates the maximum amount of time it is able to sleep
# for in poll() pending file handle events.
#
# It then goes into the poll() sleep.
#
# When poll() returns, there will zero or more file handle
# events which need to be dispatched to registered callbacks
# It may also be time to fire some periodic timers.
#
# Due to the coarse granularity of schedular timeslices, if
# we ask for a sleep of 500ms in order to satisfy a timer, we
# may return up to 1 schedular timeslice early. So even though
# our sleep timeout was reached, the registered timer may not
# technically be at its expiry point. This leads to us going
# back around the loop with a crazy 5ms sleep. So when checking
# if timeouts are due, we allow a margin of 20ms, to avoid
# these pointless repeated tiny sleeps.
def run_once(self):
sleep = -1
self.runningPoll = True
try:
next = self.next_timeout()
debug("Next timeout due at %d" % next)
if next > 0:
now = int(time.time() * 1000)
if now >= next:
sleep = 0
else:
sleep = (next - now) / 1000.0
debug("Poll with a sleep of %d" % sleep)
events = self.poll.poll(sleep)
# Dispatch any file handle events that occurred
for (fd, revents) in events:
# See if the events was from the self-pipe
# telling us to wakup. if so, then discard
# the data just continue
if fd == self.pipetrick[0]:
self.pendingWakeup = False
data = os.read(fd, 1)
continue
h = self.get_handle_by_fd(fd)
if h:
debug("Dispatch fd %d handle %d events %d" % (fd, h.get_id(), revents))
h.dispatch(self.events_from_poll(revents))
now = int(time.time() * 1000)
for t in self.timers:
interval = t.get_interval()
if interval < 0:
continue
want = t.get_last_fired() + interval
# Deduct 20ms, since scheduler timeslice
# means we could be ever so slightly early
if now >= (want-20):
debug("Dispatch timer %d now %s want %s" % (t.get_id(), str(now), str(want)))
t.set_last_fired(now)
t.dispatch()
except (os.error, select.error), e:
if e.args[0] != errno.EINTR:
raise
finally:
self.runningPoll = False
# Actually the event loop forever
def run_loop(self):
self.quit = False
while not self.quit:
self.run_once()
def interrupt(self):
if self.runningPoll and not self.pendingWakeup:
self.pendingWakeup = True
os.write(self.pipetrick[1], 'c'.encode("UTF-8"))
# Registers a new file handle 'fd', monitoring for 'events' (libvirt
# event constants), firing the callback cb() when an event occurs.
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_handle(self, fd, events, cb, opaque):
handleID = self.nextHandleID + 1
self.nextHandleID = self.nextHandleID + 1
h = self.virEventLoopPureHandle(handleID, fd, events, cb, opaque)
self.handles.append(h)
self.poll.register(fd, self.events_to_poll(events))
self.interrupt()
debug("Add handle %d fd %d events %d" % (handleID, fd, events))
return handleID
# Registers a new timer with periodic expiry at 'interval' ms,
# firing cb() each time the timer expires. If 'interval' is -1,
# then the timer is registered, but not enabled
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_timer(self, interval, cb, opaque):
timerID = self.nextTimerID + 1
self.nextTimerID = self.nextTimerID + 1
h = self.virEventLoopPureTimer(timerID, interval, cb, opaque)
self.timers.append(h)
self.interrupt()
debug("Add timer %d interval %d" % (timerID, interval))
return timerID
# Change the set of events to be monitored on the file handle
def update_handle(self, handleID, events):
h = self.get_handle_by_id(handleID)
if h:
h.set_events(events)
self.poll.unregister(h.get_fd())
self.poll.register(h.get_fd(), self.events_to_poll(events))
self.interrupt()
debug("Update handle %d fd %d events %d" % (handleID, h.get_fd(), events))
# Change the periodic frequency of the timer
def update_timer(self, timerID, interval):
for h in self.timers:
if h.get_id() == timerID:
h.set_interval(interval)
self.interrupt()
debug("Update timer %d interval %d" % (timerID, interval))
break
# Stop monitoring for events on the file handle
def remove_handle(self, handleID):
handles = []
for h in self.handles:
if h.get_id() == handleID:
self.poll.unregister(h.get_fd())
debug("Remove handle %d fd %d" % (handleID, h.get_fd()))
else:
handles.append(h)
self.handles = handles
self.interrupt()
# Stop firing the periodic timer
def remove_timer(self, timerID):
timers = []
for h in self.timers:
if h.get_id() != timerID:
timers.append(h)
debug("Remove timer %d" % timerID)
self.timers = timers
self.interrupt()
# Convert from libvirt event constants, to poll() events constants
def events_to_poll(self, events):
ret = 0
if events & libvirt.VIR_EVENT_HANDLE_READABLE:
ret |= select.POLLIN
if events & libvirt.VIR_EVENT_HANDLE_WRITABLE:
ret |= select.POLLOUT
if events & libvirt.VIR_EVENT_HANDLE_ERROR:
ret |= select.POLLERR
if events & libvirt.VIR_EVENT_HANDLE_HANGUP:
ret |= select.POLLHUP
return ret
# Convert from poll() event constants, to libvirt events constants
def events_from_poll(self, events):
ret = 0
if events & select.POLLIN:
ret |= libvirt.VIR_EVENT_HANDLE_READABLE
if events & select.POLLOUT:
ret |= libvirt.VIR_EVENT_HANDLE_WRITABLE
if events & select.POLLNVAL:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR
if events & select.POLLERR:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR
if events & select.POLLHUP:
ret |= libvirt.VIR_EVENT_HANDLE_HANGUP
return ret
###########################################################################
# Now glue an instance of the general event loop into libvirt's event loop
###########################################################################
# This single global instance of the event loop wil be used for
# monitoring libvirt events
eventLoop = virEventLoopPure()
# This keeps track of what thread is running the event loop,
# (if it is run in a background thread)
eventLoopThread = None
# These next set of 6 methods are the glue between the official
# libvirt events API, and our particular impl of the event loop
#
# There is no reason why the 'virEventLoopPure' has to be used.
# An application could easily may these 6 glue methods hook into
# another event loop such as GLib's, or something like the python
# Twisted event framework.
def virEventAddHandleImpl(fd, events, cb, opaque):
global eventLoop
return eventLoop.add_handle(fd, events, cb, opaque)
def virEventUpdateHandleImpl(handleID, events):
global eventLoop
return eventLoop.update_handle(handleID, events)
def virEventRemoveHandleImpl(handleID):
global eventLoop
return eventLoop.remove_handle(handleID)
def virEventAddTimerImpl(interval, cb, opaque):
global eventLoop
return eventLoop.add_timer(interval, cb, opaque)
def virEventUpdateTimerImpl(timerID, interval):
global eventLoop
return eventLoop.update_timer(timerID, interval)
def virEventRemoveTimerImpl(timerID):
global eventLoop
return eventLoop.remove_timer(timerID)
# This tells libvirt what event loop implementation it
# should use
def virEventLoopPureRegister():
libvirt.virEventRegisterImpl(virEventAddHandleImpl,
virEventUpdateHandleImpl,
virEventRemoveHandleImpl,
virEventAddTimerImpl,
virEventUpdateTimerImpl,
virEventRemoveTimerImpl)
# Directly run the event loop in the current thread
def virEventLoopPureRun():
global eventLoop
eventLoop.run_loop()
def virEventLoopNativeRun():
while True:
libvirt.virEventRunDefaultImpl()
# Spawn a background thread to run the event loop
def virEventLoopPureStart():
global eventLoopThread
virEventLoopPureRegister()
eventLoopThread = threading.Thread(target=virEventLoopPureRun, name="libvirtEventLoop")
eventLoopThread.setDaemon(True)
eventLoopThread.start()
def virEventLoopNativeStart():
global eventLoopThread
libvirt.virEventRegisterDefaultImpl()
eventLoopThread = threading.Thread(target=virEventLoopNativeRun, name="libvirtEventLoop")
eventLoopThread.setDaemon(True)
eventLoopThread.start()
##########################################################################
# Everything that now follows is a simple demo of domain lifecycle events
##########################################################################
def eventToString(event):
eventStrings = ( "Defined",
"Undefined",
"Started",
"Suspended",
"Resumed",
"Stopped",
"Shutdown",
"PMSuspended",
"Crashed" )
return eventStrings[event]
def detailToString(event, detail):
eventStrings = (
( "Added", "Updated" ),
( "Removed", ),
( "Booted", "Migrated", "Restored", "Snapshot", "Wakeup" ),
( "Paused", "Migrated", "IOError", "Watchdog", "Restored", "Snapshot", "API error" ),
( "Unpaused", "Migrated", "Snapshot" ),
( "Shutdown", "Destroyed", "Crashed", "Migrated", "Saved", "Failed", "Snapshot"),
( "Finished", ),
( "Memory", "Disk" ),
( "Panicked", )
)
return eventStrings[event][detail]
def myDomainEventCallback1 (conn, dom, event, detail, opaque):
print("myDomainEventCallback1 EVENT: Domain %s(%s) %s %s" % (dom.name(), dom.ID(),
eventToString(event),
detailToString(event, detail)))
def myDomainEventCallback2 (conn, dom, event, detail, opaque):
print("myDomainEventCallback2 EVENT: Domain %s(%s) %s %s" % (dom.name(), dom.ID(),
eventToString(event),
detailToString(event, detail)))
def myDomainEventRebootCallback(conn, dom, opaque):
print("myDomainEventRebootCallback: Domain %s(%s)" % (dom.name(), dom.ID()))
def myDomainEventRTCChangeCallback(conn, dom, utcoffset, opaque):
print("myDomainEventRTCChangeCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), utcoffset))
def myDomainEventWatchdogCallback(conn, dom, action, opaque):
print("myDomainEventWatchdogCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), action))
def myDomainEventIOErrorCallback(conn, dom, srcpath, devalias, action, opaque):
print("myDomainEventIOErrorCallback: Domain %s(%s) %s %s %d" % (dom.name(), dom.ID(), srcpath, devalias, action))
def myDomainEventGraphicsCallback(conn, dom, phase, localAddr, remoteAddr, authScheme, subject, opaque):
print("myDomainEventGraphicsCallback: Domain %s(%s) %d %s" % (dom.name(), dom.ID(), phase, authScheme))
def myDomainEventDiskChangeCallback(conn, dom, oldSrcPath, newSrcPath, devAlias, reason, opaque):
print("myDomainEventDiskChangeCallback: Domain %s(%s) disk change oldSrcPath: %s newSrcPath: %s devAlias: %s reason: %s" % (
dom.name(), dom.ID(), oldSrcPath, newSrcPath, devAlias, reason))
def myDomainEventTrayChangeCallback(conn, dom, devAlias, reason, opaque):
print("myDomainEventTrayChangeCallback: Domain %s(%s) tray change devAlias: %s reason: %s" % (
dom.name(), dom.ID(), devAlias, reason))
def myDomainEventPMWakeupCallback(conn, dom, reason, opaque):
print("myDomainEventPMWakeupCallback: Domain %s(%s) system pmwakeup" % (
dom.name(), dom.ID()))
def myDomainEventPMSuspendCallback(conn, dom, reason, opaque):
print("myDomainEventPMSuspendCallback: Domain %s(%s) system pmsuspend" % (
dom.name(), dom.ID()))
def myDomainEventBalloonChangeCallback(conn, dom, actual, opaque):
print("myDomainEventBalloonChangeCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), actual))
def myDomainEventPMSuspendDiskCallback(conn, dom, reason, opaque):
print("myDomainEventPMSuspendDiskCallback: Domain %s(%s) system pmsuspend_disk" % (
dom.name(), dom.ID()))
def myDomainEventDeviceRemovedCallback(conn, dom, dev, opaque):
print("myDomainEventDeviceRemovedCallback: Domain %s(%s) device removed: %s" % (
dom.name(), dom.ID(), dev))
run = True
def myConnectionCloseCallback(conn, reason, opaque):
reasonStrings = (
"Error", "End-of-file", "Keepalive", "Client",
)
print("myConnectionCloseCallback: %s: %s" % (conn.getURI(), reasonStrings[reason]))
run = False
def usage():
print("usage: "+os.path.basename(sys.argv[0])+" [-hdl] [uri]")
print(" uri will default to qemu:///system")
print(" --help, -h Print(this help message")
print(" --debug, -d Print(debug output")
print(" --loop, -l Toggle event-loop-implementation")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hdl", ["help", "debug", "loop"])
except getopt.GetoptError, err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-d", "--debug"):
global do_debug
do_debug = True
if o in ("-l", "--loop"):
global use_pure_python_event_loop
use_pure_python_event_loop ^= True
if len(args) >= 1:
uri = args[0]
else:
uri = "qemu:///system"
print("Using uri:" + uri)
# Run a background thread with the event loop
if use_pure_python_event_loop:
virEventLoopPureStart()
else:
virEventLoopNativeStart()
vc = libvirt.openReadOnly(uri)
# Close connection on exit (to test cleanup paths)
old_exitfunc = getattr(sys, 'exitfunc', None)
def exit():
print("Closing " + vc.getURI())
vc.close()
if (old_exitfunc): old_exitfunc()
sys.exitfunc = exit
vc.registerCloseCallback(myConnectionCloseCallback, None)
#Add 2 callbacks to prove this works with more than just one
vc.domainEventRegister(myDomainEventCallback1,None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, myDomainEventCallback2, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, myDomainEventRebootCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_RTC_CHANGE, myDomainEventRTCChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_IO_ERROR, myDomainEventIOErrorCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_WATCHDOG, myDomainEventWatchdogCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_GRAPHICS, myDomainEventGraphicsCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_DISK_CHANGE, myDomainEventDiskChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_TRAY_CHANGE, myDomainEventTrayChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_PMWAKEUP, myDomainEventPMWakeupCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_PMSUSPEND, myDomainEventPMSuspendCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_BALLOON_CHANGE, myDomainEventBalloonChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_PMSUSPEND_DISK, myDomainEventPMSuspendDiskCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED, myDomainEventDeviceRemovedCallback, None)
vc.setKeepAlive(5, 3)
# The rest of your app would go here normally, but for sake
# of demo we'll just go to sleep. The other option is to
# run the event loop in your main thread if your app is
# totally event based.
while run:
time.sleep(1)
if __name__ == "__main__":
main()
| utkarshsins/baadal-libvirt-python | examples/event-test.py | Python | gpl-2.0 | 22,504 |
# converts incoming data array from temperature measurments
# The incoming data should be of the form [Temp, Time, Date] this is put into a dictionary
# The dictionary can then be converted into a comma delimited file at the end of the program
import os
import time
import Temp
dire = "/home/pi/brewing/temps/"
def inti(dire):
file = str(raw_input("What Would you like to name this file?"))
if ".txt" in file == true:
file = file
else:
file += ".txt"
fi = dire + file
f = open("fi","w+")
f.write("Temp, Time, Date\n")
f.close()
return fi
def write(dier):
fi = inti(dire)
f = open(fi,"w+")
while True:
a = ",".join(Temp.read_temp())+"\n"
f.write(a)
time.sleep(60)
| Mr-President/brewing | dataman.py | Python | bsd-2-clause | 686 |
from Sire.IO import *
from Sire.Mol import *
from Sire.System import *
from Sire.MM import *
from Sire.Units import *
amber = Amber()
print("Loading the molecules...")
(molecules, space) = amber.readCrdTop("test/io/SYSTEM.crd", "test/io/SYSTEM.top")
print("...all loaded :-)")
protein = molecules[MolNum(2)].molecule()
#protein = molecules[MolNum(1)].molecule()
group_clj = IntraCLJFF("group_clj")
shift_clj = IntraCLJFF("shift_clj")
shift_clj.setShiftElectrostatics(True)
field_clj = IntraCLJFF("field_clj")
field_clj.setUseReactionField(True)
field_clj.setReactionFieldDielectric(78.3)
forcefields = [ group_clj, shift_clj, field_clj ]
group_coul = group_clj.components().coulomb()
shift_coul = shift_clj.components().coulomb()
field_coul = field_clj.components().coulomb()
system = System()
for forcefield in forcefields:
forcefield.add(protein)
system.add(forcefield)
def printEnergies(nrgs):
keys = list(nrgs.keys())
keys.sort()
for key in keys:
print("%25s : %12.8f" % (key, nrgs[key]))
system.setProperty("switchingFunction", HarmonicSwitchingFunction(10*angstrom, 9.5*angstrom))
printEnergies(system.energies())
print("\nEnergy with respect to cutoff length\n")
print(" Distance Group Shifted ReactionField ")
for i in range(10,501,5):
x = i*0.1
switchfunc = HarmonicSwitchingFunction(x*angstrom, (x-0.5)*angstrom)
system.setProperty("switchingFunction", switchfunc)
print("%12.8f %12.8f %12.8f %12.8f" % (x, system.energy(group_coul).value(),
system.energy(shift_coul).value(), system.energy(field_coul).value()))
| chryswoods/SireTests | unittests/SireMM/testintracutoff.py | Python | gpl-2.0 | 1,618 |
from ccs import constants
from ccs import default
##################################################################################
# HOSTNAME #
##################################################################################
HOSTNAME = "api.therocktrading.com"
##################################################################################
# REQUEST #
##################################################################################
REQUESTS = {}
REQUESTS["ticker"] = "/v1/funds/[symbol]/ticker"
REQUESTS["tickers"] = "/v1/funds/tickers"
REQUESTS["trades"] = "/v1/funds/[symbol]/trades"
REQUESTS["orderbook"] = "/v1/funds/[symbol]/orderbook"
REQUESTS["funds"] = "/v1/funds"
REQUESTS["fund"] = "/v1/funds/[symbol]"
##################################################################################
# HEADERS #
##################################################################################
HEADER = default.HEADER
COMPRESSION = constants.GZIP
TIMEOUT = default.TIMEOUT
##################################################################################
# MAPPING #
##################################################################################
MAPPING = {}
# TICKER #########################################################################
# DEFAULT
# TICKER = {LOW: 'low',
# HIGH: 'high',
# ASK: 'ask',
# BID: 'bid',
# LAST: 'last',
# VOLUME24H: 'volume24h',
# TIMESTAMP: 'timestamp'}
MAPPING[constants.TICKER] = {}
default.complete(default.TICKER, MAPPING[constants.TICKER])
# TRADES #########################################################################
# DEFAULT
# TRADE = {TID: "tid",
# PRICE: "price",
# AMOUNT: "amount",
# TIMESTAMP: "timestamp"}
MAPPING[constants.TRADE] = {}
default.complete(default.TRADE, MAPPING[constants.TRADE])
MAPPING[constants.TRADE_TYPE] = "" #{constants.BUY: "buy", constants.SELL: "sell"}
# ORDERBOOK #######################################################################
# DEFAULT
# ORDER = {PRICE: "price", AMOUNT: "amount"}
MAPPING[constants.ORDER] = {}
default.complete(default.ORDER, MAPPING[constants.ORDER])
##################################################################################
# SCHEMA #
##################################################################################
SCHEMA = {}
# TICKER #########################################################################
# {
# "type": "object",
# "properties": {
# "mid": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "bid": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "ask": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "last_price": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "low": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "high": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "volume": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "timestamp": {"type": "string", "pattern": constants.NUMBER_PATTERN}
# },
# "minProperties": 8,
# "additionalProperties": False
#
# }
SCHEMA[constants.TICKER] = {}
# TRADES #########################################################################
# {
# "type": "object",
# "properties": {
# "tid": {"type": "number"},
# "price": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "amount": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "type": {"type": "string", "enum": ["sell", "buy"]},
# "exchange": {"type": "string"},
# "timestamp": {"type": "number"}
# },
# "required": ["tid", "price", "amount", "type", "exchange","timestamp"],
# "additionalProperties": False
# }
SCHEMA[constants.TRADE] = {}
# {
# "type": "array",
# "items": SCHEMA[constants.TRADE]
# }
SCHEMA[constants.TRADES] = {}
# ORDERBOOK #######################################################################
# {
# "type": "object",
# "properties": {
# "price": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "amount": {"type": "string", "pattern": constants.NUMBER_PATTERN},
# "timestamp": {"type": "string", "pattern": constants.NUMBER_PATTERN}
# },
# "required": ["price", "amount", "timestamp"],
# "additionalProperties": False
# }
SCHEMA[constants.ORDER] = {}
# {
# "type": "array",
# "items": SCHEMA[constants.ORDER]
# }
SCHEMA[constants.ORDERS] = {}
# {
# "type": "object",
# "properties": {
# "asks": SCHEMA[constants.ORDERS],
# "bids": SCHEMA[constants.ORDERS]
# },
# "required": ["asks", "bids"],
# "additionalProperties": False
# }
SCHEMA[constants.ORDERBOOK] = {}
| Honzin/ccs | dev/therocktraiding/configuration.py | Python | agpl-3.0 | 5,212 |
#! /usr/bin/python
#-*-coding: utf8-*-
import os.path
import collections
import jinja2
import random
import qrcode
import os
import sys
import pprint
import json
import scanresults
import argparse
database = collections.defaultdict(lambda: [])
questions_by_id = {}
restrictions = {}
restrictions_order = {}
test_id = 1
debug = sys.argv.count('-d')
count = 0
VERSION = 1
def preprocess_line(line, remove_comments=True):
if debug > 1:
print(u'Reading line: "%s"' % line.decode('utf8'))
line = line.strip().decode('utf8')
if remove_comments and '%' in line:
idx = line.index('%')
if idx >= 0 and (idx == 0 or line[idx-1] != '\\'):
line = line[:idx].strip()
if debug > 1:
print(u'Returning line: "%s"' % line)
return line
def parser(master_path):
"""
Lee el archivo master y se parsea cada una de las preguntas.
"""
master = open(master_path)
lines = master.readlines()
master.close()
# Parse the header
i = 0
while i < len(lines):
l = preprocess_line(lines[i][:-1])
i += 1
if l:
if l.startswith('-'):
break
tag, value = l.split(':')
tag = tag.strip()
value = int(value.strip())
restrictions[tag] = value
restrictions_order[tag] = len(restrictions_order)
if debug:
print('Adding restriction: %s: %i' % (tag, value))
if not 'total' in restrictions:
raise ValueError('Missing `total` directive')
if debug:
print('Restrictions:')
pprint.pprint(restrictions)
# Parse questions
while i < len(lines):
i = parse_question(i, lines)
def parse_question(i, lines):
"""Crea el objeto pregunta y la agrega en los contenidos
que se evalúan en la misma."""
global count
# Find the id line
i, identifier = parse_id(i, lines)
i, tags = parse_tag(i, lines)
i, header = parse_header(i, lines)
i, answers = parse_answers(i, lines)
count += 1
question = Question(header, answers, count, tags)
# Add answers to given tags
for t in tags:
database[t].append(question)
questions_by_id[count] = question
return i
def parse_header(i, lines):
header = u""
while i < len(lines):
l = preprocess_line(lines[i][:-1], remove_comments=False)
if l and l[0] == '_':
break
header += l + '\n'
i += 1
header = header.strip()
if not header:
raise ValueError('Header not found at line %i' % i)
if debug:
print(u'Found header: \n%s' % header)
return i, header
def strip(line):
lines = list(reversed(line.split('\n')))
i = 0
while i < len(lines):
l = lines[i].strip()
if l and not l.startswith('%'):
break
i += 1
if i >= len(lines):
return None
return u'\n'.join(reversed(lines[i:]))
def parse_answer(i, lines):
answer = u""
while i < len(lines):
l = preprocess_line(lines[i][:-1], remove_comments=False)
if l and ((answer and l[0] == '_') or l[0] == '('):
break
answer += l + '\n'
i += 1
if not answer:
return i, None
answer = strip(answer)
if answer.lower().startswith('_*'):
answer = (False, True, answer[2:].strip())
elif answer.lower().startswith('_x*'):
answer = (True, True, answer[3:].strip())
elif answer.lower().startswith('_x'):
answer = (True, False, answer[2:].strip())
elif answer.lower().startswith('_'):
answer = (False, False, answer[1:].strip())
else:
raise ValueError(u'Invalid answer prefix in "%s" at line %i' %
(answer, i))
if debug:
print(u'Found answer:\n%s' % str(answer))
return i, answer
def parse_answers(i, lines):
answers = []
while i < len(lines):
i, answer = parse_answer(i, lines)
if answer:
answers.append(answer)
else:
break
if not answers:
raise ValueError('No answer found at line %i' % i)
return i, answers
def parse_tag(i, lines):
i, l = skip_blank(i, lines)
if l is None:
raise ValueError('Tag line not found')
tags = l.split()
for t in tags:
if t[0] != '@':
raise ValueError('Invalid tag %s' % t)
if debug:
print(u'Found tag: %s' % t)
return i, tags
def skip_blank(i, lines):
while i < len(lines):
l = preprocess_line(lines[i][:-1])
if l:
return i+1, l
i += 1
return i, None
def parse_id(i, lines):
i, l = skip_blank(i, lines)
if l is None:
raise ValueError('Id line not found')
if l[0] != '(' or l[-1] != ')':
raise ValueError(u'Not valid Id line: %s at line %i' % (l, i))
id_text = l[1:-1]
if debug:
print(u'Found identifier: %s' % id_text)
return i, id_text
class Question:
"""Las preguntas tienen un campo `header` que es el enunciado,
y opciones. Algunas de estas opciones pueden considerarse
respuestas correctas."""
def __init__(self, header, options, number, tags, options_id=None, fixed=None):
if (not header or not options):
raise ValueError(u'Invalid question %s' % number)
self.options = options
self.header = header
self.number = number
self.tags = tags
self.fixed = fixed or {}
self.options_id = options_id
if self.options_id is None:
self.options_id = {}
for i, o in enumerate(self.options):
if o in self.options_id:
raise Exception('Invalid option exception. Duplicated answers are not allowed')
self.options_id[o] = i
if o[1]:
self.fixed[o] = i
@property
def correct_answers(self):
return len([None for r, f, o in self.options if r])
@property
def answers_count(self):
return len(self.options)
def enumerate_options(self):
return enumerate(self.options)
def shuffle(self):
"""
Devuelve las opciones desordenadas.
"""
options = list(self.options)
random.shuffle(options)
for o in list(options):
if o in self.fixed:
pos = self.fixed[o]
idx = options.index(o)
tmp = options[pos]
options[pos] = o
options[idx] = tmp
return Question(self.header, options, self.number, self.tags, self.options_id, self.fixed)
def convert(self):
order = [self.options_id[o] for o in self.options]
if debug:
print(order)
return scanresults.Question(self.number, len(self.options),
self.multiple, order=order)
@property
def multiple(self):
return len([o for o in self.options if o[0]]) > 1
def options_text(self, i, max):
alphabet = list("abcdefghijklmnopqrstuvwxyz")
first = True
for a, o in zip(alphabet, self.options):
if first:
yield i, a
first = False
else:
yield "", a
for i in range(len(self.options), max):
yield "", ""
def __str__(self):
return str(self.number)
def qrcode(self):
opts = "(%s)" % ",".join(str(self.options_id[o])
for o in self.options)
# return str(self.number) + '**'
if self.multiple:
return"%i*%s" % (self.number, opts)
else:
return "%i%s" % (self.number, opts)
def qrcode_data(test_id, i, test):
return "%i|%i|%i" % (test_id, i, VERSION)
def generate_qrcode(i, test):
filename = 'generated/v{0}/qrcode-{1}.png'.format(test_id, i)
f = open(filename, 'w')
qr = qrcode.QRCode(box_size=10, border=0)
data = qrcode_data(test_id, i, test)
qr.add_data(data)
if debug > 1:
print('QR Code data: %s' % data)
if debug > 2:
qr.print_tty()
qr.make()
img = qr.make_image()
img.save(f)
f.close()
def generate_quiz(args=None):
total = restrictions['total']
res = dict(restrictions)
res.pop('total')
base = {}
for k, v in database.items():
base[k] = list(v)
print('new quiz')
print('total', total)
print('base', base)
print('res', res)
print('')
test = set()
tries = 0
def get_question(tag):
print('tag', tag)
print('base', base)
if tag not in base:
raise ValueError('Could not fullfill a restriction '
'with tag "%s"' % tag)
i = random.choice(range(len(base[tag])))
q = base[tag].pop(i)
print('base[%s]' % tag, base[tag], bool(base[tag]))
if not base[tag]:
base.pop(tag)
if args and not args.dont_shuffle_options:
q = q.shuffle()
if debug > 1:
print(u'Selection question:\n%s' % str(q))
print('')
return q
while len(test) < total and tries < 10 * total:
print('test', test)
if res:
tag = random.choice(res.keys())
q = get_question(tag)
res[tag] -= 1
if not res[tag]:
res.pop(tag)
else:
tag = random.choice(base.keys())
q = get_question(tag)
test.add(q)
if len(test) < total:
raise ValueError('Could not complete test')
test = list(test)
random.shuffle(test)
print('restictions order', restrictions_order)
if args and args.dont_shuffle_tags:
test.sort(key=lambda q: restrictions_order.get(q.tags[0], float('inf')))
if args and args.sort_questions:
test.sort(key=lambda q: q.number)
return test
def generate(n, args):
# Guaranteeing reproducibility
seed = args.seed or random.randint(1, 2 ** 32)
random.seed(seed)
text_template = jinja2.Template(open(args.text_template).
read().decode('utf8'))
answer_template = jinja2.Template(open(args.answer_template).
read().decode('utf8'))
sol_template = jinja2.Template(open('templates/solution_template.txt').
read().decode('utf8'))
master_template = jinja2.Template(open(args.master_template).
read().decode('utf8'))
questions = set()
for qs in database.values():
for q in qs:
questions.add(q)
questions = sorted(questions, key=lambda q: q.number)
if not args.dont_generate_master:
master_file = open('generated/v{0}/Master.tex'.format(test_id), 'w')
master_file.write(master_template.render(test=questions,
header=args.title).encode('utf8'))
master_file.close()
sol_file = open('generated/v{0}/grader.txt'.format(test_id), 'w')
sol_file.write(sol_template.render(test=questions,
test_id=test_id, questions_value=args.questions_value).encode('utf8'))
sol_file.close()
order = {}
answers = []
for i in range(n):
if debug:
print('Generating quiz number %i' % i)
test = generate_quiz(args)
# order[i] = dict(exam_id=test_id, id=i, options=[])
order[i] = scanresults.Test(test_id, i, [q.convert() for q in test])
generate_qrcode(i, test)
if not args.dont_generate_text:
text_file = open('generated/v{0}/Test-{1:04}.tex'.format(test_id, i), 'w')
text_file.write(text_template.render(
test=test, number=i, header=args.title).encode('utf8'))
text_file.close()
answers.append(dict(test=list(enumerate(test)), number=i, seed=seed, max=max(len(q.options) for q in test)))
if len(answers) == args.answers_per_page or i == n - 1:
answer_file = open('generated/v{0}/Answer-{1:04}.tex'.format(test_id, i / args.answers_per_page), 'w')
answer_file.write(answer_template.render(answers=answers).encode('utf8'))
answer_file.close()
answers = []
scanresults.dump(order, 'generated/v{0}/order.json'.format(test_id))
with open('generated/v{0}/seed'.format(test_id), 'w') as fp:
fp.write(str(seed) + '\n')
if __name__ == '__main__':
args_parser = argparse.ArgumentParser(description="Parses a master file and generates tests.")
args_parser.add_argument('master', metavar="PATH", help="Path to the master file that contains the test description.")
args_parser.add_argument('-c', '--tests-count', metavar='N', help="Number of actual tests to generate. If not supplied, only the master file will be generated.", type=int, default=0)
args_parser.add_argument('-a', '--answers-per-page', help="Number of answer sections to generate per page. By default is 1. It is up to you to ensure all them fit right in your template.", metavar='N', type=int, default=1)
args_parser.add_argument('-t', '--title', help="Title of the test.", default="")
args_parser.add_argument('--answer-template', help="Template for the answers sheets.", default="latex/answer_template.tex")
args_parser.add_argument('--master-template', help="Template for the master sheet.", default="latex/master_template.tex")
args_parser.add_argument('--text-template', help="Template for the text sheets.", default="latex/text_template.tex")
args_parser.add_argument('-v', '--questions-value', help="Default value for each question.", metavar='N', type=float, default=1.)
args_parser.add_argument('--dont-shuffle-tags', help="Disallow shuffling of tags.", action='store_true')
args_parser.add_argument('--sort-questions', help="After selecting questions, put them in the same order as in the master.", action='store_true')
args_parser.add_argument('--dont-shuffle-options', help="Do not shuffle the options in the questions.", action='store_true')
args_parser.add_argument('--dont-generate-text', help="Do not generate text sheets, only answers.", action='store_true')
args_parser.add_argument('--election', help="Toggle all options for election mode.", action='store_true')
args_parser.add_argument('--questionnaire', help="Toggle all options for questionnaire mode.", action='store_true')
args_parser.add_argument('--dont-generate-master', help="Do not generate a master file.", action='store_true')
args = args_parser.parse_args()
if args.election:
args.answer_template = 'latex/election_template.tex'
args.sort_questions = True
args.dont_shuffle_options = True
args.dont_generate_text = True
args.dont_generate_master = True
if args.questionnaire:
args.answer_template = 'latex/questionnaire_template.tex'
args.sort_questions = True
args.dont_shuffle_options = True
args.dont_generate_text = True
args.dont_generate_master = True
if not os.path.exists('generated'):
os.mkdir('generated')
for d in os.listdir('generated'):
num = int(d[1:])
if num >= test_id:
test_id = num + 1
os.mkdir('generated/v{0}'.format(test_id))
parser(master_path)
generate(args.tests_count, args)
print('Generated v{0}'.format(test_id))
| matcom/autoexam | gen.py | Python | mit | 15,531 |
# -*- coding: utf-8 -*-
"""
Pelican Mathjax Markdown Extension
==================================
An extension for the Python Markdown module that enables
the Pelican python blog to process mathjax. This extension
gives Pelican the ability to use Mathjax as a "first class
citizen" of the blog
"""
import markdown
from markdown.util import etree
from markdown.util import AtomicString
class PelicanMathJaxPattern(markdown.inlinepatterns.Pattern):
"""Inline markdown processing that matches mathjax"""
def __init__(self, pelican_mathjax_extension, tag, pattern):
super(PelicanMathJaxPattern,self).__init__(pattern)
self.math_tag_class = pelican_mathjax_extension.getConfig('math_tag_class')
self.pelican_mathjax_extension = pelican_mathjax_extension
self.tag = tag
def handleMatch(self, m):
node = markdown.util.etree.Element(self.tag)
node.set('class', self.math_tag_class)
prefix = '\\(' if m.group('prefix') == '$' else m.group('prefix')
suffix = '\\)' if m.group('suffix') == '$' else m.group('suffix')
node.text = markdown.util.AtomicString(prefix + m.group('math') + suffix)
# If mathjax was successfully matched, then JavaScript needs to be added
# for rendering. The boolean below indicates this
self.pelican_mathjax_extension.mathjax_needed = True
return node
class PelicanMathJaxCorrectDisplayMath(markdown.treeprocessors.Treeprocessor):
"""Corrects invalid html that results from a <div> being put inside
a <p> for displayed math"""
def __init__(self, pelican_mathjax_extension):
self.pelican_mathjax_extension = pelican_mathjax_extension
def correct_html(self, root, children, div_math, insert_idx, text):
"""Separates out <div class="math"> from the parent tag <p>. Anything
in between is put into its own parent tag of <p>"""
current_idx = 0
for idx in div_math:
el = markdown.util.etree.Element('p')
el.text = text
el.extend(children[current_idx:idx])
# Test to ensure that empty <p> is not inserted
if len(el) != 0 or (el.text and not el.text.isspace()):
root.insert(insert_idx, el)
insert_idx += 1
text = children[idx].tail
children[idx].tail = None
root.insert(insert_idx, children[idx])
insert_idx += 1
current_idx = idx+1
el = markdown.util.etree.Element('p')
el.text = text
el.extend(children[current_idx:])
if len(el) != 0 or (el.text and not el.text.isspace()):
root.insert(insert_idx, el)
def run(self, root):
"""Searches for <div class="math"> that are children in <p> tags and corrects
the invalid HTML that results"""
math_tag_class = self.pelican_mathjax_extension.getConfig('math_tag_class')
for parent in root:
div_math = []
children = list(parent)
for div in parent.findall('div'):
if div.get('class') == math_tag_class:
div_math.append(children.index(div))
# Do not process further if no displayed math has been found
if not div_math:
continue
insert_idx = list(root).index(parent)
self.correct_html(root, children, div_math, insert_idx, parent.text)
root.remove(parent) # Parent must be removed last for correct insertion index
return root
class PelicanMathJaxAddJavaScript(markdown.treeprocessors.Treeprocessor):
"""Tree Processor for adding Mathjax JavaScript to the blog"""
def __init__(self, pelican_mathjax_extension):
self.pelican_mathjax_extension = pelican_mathjax_extension
def run(self, root):
# If no mathjax was present, then exit
if (not self.pelican_mathjax_extension.mathjax_needed):
return root
# Add the mathjax script to the html document
mathjax_script = etree.Element('script')
mathjax_script.set('type','text/javascript')
mathjax_script.text = AtomicString(self.pelican_mathjax_extension.getConfig('mathjax_script'))
root.append(mathjax_script)
# Reset the boolean switch to false so that script is only added
# to other pages if needed
self.pelican_mathjax_extension.mathjax_needed = False
return root
class PelicanMathJaxExtension(markdown.Extension):
"""A markdown extension enabling mathjax processing in Markdown for Pelican"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
self.config['mathjax_script'] = ['', 'Mathjax JavaScript script']
self.config['math_tag_class'] = ['math', 'The class of the tag in which mathematics is wrapped']
super(PelicanMathJaxExtension,self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
config['mathjax_script'] = [config['mathjax_script'], 'Mathjax JavaScript script']
config['math_tag_class'] = [config['math_tag_class'], 'The class of the tag in which mathematic is wrapped']
super(PelicanMathJaxExtension,self).__init__(config)
# Used as a flag to determine if javascript
# needs to be injected into a document
self.mathjax_needed = False
def extendMarkdown(self, md, md_globals):
# Regex to detect mathjax
mathjax_inline_regex = r'(?P<prefix>\$)(?P<math>.+?)(?P<suffix>(?<!\s)\2)'
mathjax_display_regex = r'(?P<prefix>\$\$|\\begin\{(.+?)\})(?P<math>.+?)(?P<suffix>\2|\\end\{\3\})'
# Process mathjax before escapes are processed since escape processing will
# intefer with mathjax. The order in which the displayed and inlined math
# is registered below matters
md.inlinePatterns.add('mathjax_displayed', PelicanMathJaxPattern(self, 'div', mathjax_display_regex), '<escape')
md.inlinePatterns.add('mathjax_inlined', PelicanMathJaxPattern(self, 'span', mathjax_inline_regex), '<escape')
# Correct the invalid HTML that results from teh displayed math (<div> tag within a <p> tag)
md.treeprocessors.add('mathjax_correctdisplayedmath', PelicanMathJaxCorrectDisplayMath(self), '>inline')
# If necessary, add the JavaScript Mathjax library to the document. This must
# be last in the ordered dict (hence it is given the position '_end')
md.treeprocessors.add('mathjax_addjavascript', PelicanMathJaxAddJavaScript(self), '_end')
| jprine/pelican-plugins | render_math/pelican_mathjax_markdown_extension.py | Python | agpl-3.0 | 6,633 |
from config import *
from flask_wtf import Form
from wtforms import IntegerField, TextAreaField, TextField, BooleanField, SubmitField, SelectMultipleField, validators
class CustomForm(Form):
def __init__(self,*args):
super(CustomForm,self).__init__(*args)
self.__delitem__('csrf_token')
self.csrf_enabled = False
class CourseQueryForm(CustomForm):
dept = TextField('Department', id="dept")
keywords = TextField('Keywords', id="keywords")
gen_eds = SelectMultipleField('Gen Ed Fulfillments',
choices=[('',''),
('BL','BL'),
('HB','HB'),
('HBSSM','HBSSM'),
('HE','HE'),
('HEPT','HEPT'),
('Hist','Hist'),
('Intcl','Intcl'),
('NWL','NWL'),
('NWNL','NWNL'),
('Quant','Quant'),
('Rel','Rel'),
('Skl','Skl'),
('Wel','Wel')], id="gen_eds")
class AltDescForm(CustomForm):
content = TextAreaField('Content',
[validators.DataRequired()],
id="content")
| JohnDoorenbos/courseFinder | forms.py | Python | mit | 1,537 |
#Filters:
import os
import re
import tRNASeqTools.extractor as extractor
class IsTRNA:
def __init__(self, output_path):
self.ANTICODON_LOOP_GUIDELINES = [0, (('T'), ('A', 'G')), [], [], 0]
self.allowed_pairings = {"G":("C", "T"), "T":("A", "G"), "C":("G"), "A":("T"), "N": ()}
self.sub_size = 24
self.output_path = output_path
self.T_LOOP_AND_ACCEPTOR_GUIDELINES = [[], 0, 0]
self.SET_UP_FILTERS = {"Allow_one_mismatch_in_the_anticodon_pairs": self.change_anticodon_loop_guidelines(0, 1), #Canonical
"Positions_34_and_37": self.change_anticodon_loop_guidelines(1, (('T'), ('A', 'G'))), #Canonical
## "Anticodon_arm_starting_pos_at_11": self.change_anticodon_loop_guidelines(4, 11),
"Anticodon_arm_starting_pos_at_24": self.change_anticodon_loop_guidelines(4, 24), #Canonical
"Type_I_length_between_8_and_9": self.change_anticodon_loop_guidelines(2, [8, 9]), #Canonical
"Type_II_length_between_16_and_27": self.change_anticodon_loop_guidelines(3, range(16, 27)), #Canonical
## "T_region_length_between_4_and_20": self.change_anticodon_loop_guidelines(2, range(3, 25)),
"require_T_Loop_G_at_0": self.change_T_and_acc_guidelines(0, (0, "G")), #Canonical
"require_T_Loop_T_at_1": self.change_T_and_acc_guidelines(0, (1, "T")), #Canonical
"require_T_Loop_T_at_2": self.change_T_and_acc_guidelines(0, (2, "T")), #Canonical
"require_T_Loop_C_at_3": self.change_T_and_acc_guidelines(0, (3, "C")), #Canonical
"require_T_Loop_C_at_8": self.change_T_and_acc_guidelines(0, (8, "C")), #Canonical
"require_acceptor_C_at_-3": self.change_T_and_acc_guidelines(0, (-3, "C")), #Canonical
"require_acceptor_C_at_-2": self.change_T_and_acc_guidelines(0, (-2, "C")), #Canonical
"require_acceptor_A_at_-1": self.change_T_and_acc_guidelines(0, (-1, "A")), #Canonical
"Allow_one_mismatch_in_T-loop_and_acceptor": self.change_T_and_acc_guidelines(1, 1), #Canonical
## "Allow_no_mismatches_in_T-loop_and_acceptor": self.change_T_and_acc_guidelines(1, 0),
## "Require_Acceptor_Stem_Matching_with_one_mismatch": self.change_T_and_acc_guidelines(2, (True, 2))
}
self.FILTERS = {"Longer_than_30": lambda seq: len(seq) > 24, #Canonical: 24
"Shorter_than_200": lambda seq: len(seq) < 200, #Canonical: 200
## "Anticodon_is_known": self.isAnticodonKnown,
"T_loop_and_acceptor_is_acceptable": self.t_loop_and_acceptor, #Canonical
## "D_region_and_T_region_acceptable_length": self.check_D_and_T_region_lengths
}
self.D_region_range = list(range(5, 22))
for i in range(len(self.D_region_range)):
self.D_region_range[i] += 13
self.T_region_range = list(range(17, 25))
for j in range(len(self.T_region_range)):
self.T_region_range[j] = -(self.T_region_range[j] + 22)
for elem in self.SET_UP_FILTERS:
self.SET_UP_FILTERS[elem]
if self.T_LOOP_AND_ACCEPTOR_GUIDELINES[2] == 0:
self.T_LOOP_AND_ACCEPTOR_GUIDELINES[2] = (False, 0)
FILTER_DESCRIPTIONS = {"Allow_one_mismatch_in_the_anticodon_pairs": "This filter allows a single mismatch when paring the anticodon stem",
"Positions_34_and_37": "This filter requires that position 34 (right before the anticodon) is a T, and position 37 (right after the anticodon) is an A or G",
"Anticodon_arm_starting_pos_at_N": "This filter determines where the code starts looking for the anticodon arm. For non-canoncial, this is the length of the 3' NCCA and acceptor stem, up to the base of the T-region. For canonical searches, this is the 3' NCCA, acceptor stem, 3' T-stem and T-loop, since the V-loop length includes the 5' T-stem loop",
"Type_I_length_between_8_and_9": "For a sequence to be counted as a Type I sequence, the distance between the G at the end of the T-Stem loop and the anticodon must be 8 or 9 (this is the 4-5 of the V-loop + 4)",
"Type_II_length_between_16_and_27": "For a sequence to be counted as a Type II sequence, the distance between the G at the end of the T-Stem loop and the anticodon must be 16-27 (this is the 12-23 of the V-loop + 4)",
"T_region_length_between_4_and_20": "This is a non-canonical search filter, where the length of the T-region varied instead of the V-loop",
"require_T_Loop_G_at_0": "This filter requires that the 5' T-stem ends with a G",
"require_T_Loop_T_at_1": "This filter requires that the T-loop starts with a T",
"require_T_Loop_T_at_2": "This filter requires that the T-loop's second position is a T",
"require_T_Loop_C_at_3": "This filter requires that the T-loop's third position is a C",
"require_T_Loop_C_at_8": "This filter requires that the 3' T-stem stars with a C",
"require_acceptor_C_at_minus_3": "This filter requires that the acceptor ends with CNN",
"require_acceptor_C_at_minus_2": "This filter requires that the acceptor ends with NCN",
"require_acceptor_A_at_minus_1": "This filter requires that the acceptor ends with NNA",
"Allow_one_mismatch_in_T-loop_and_acceptor": "This filter allows a single mismatch within the requirements for the T-loop and the acceptor, as listed above",
"Longer_than_24": "This filter requires that the sequence be longer than 24 nucleotides",
"Shorter_than_200": "This filter requires that the sequence be shorter than 200 nucleotides",
"Anticodon_is_known": "This filter discards all sequences where the anticodon was not found with the current parameters",
"T-Loop_and_acceptor are acceptable": "This filter discards all sequences where the T-loop and Acceptor are not found using the current parameters",
"Require_Acceptor_Stem_Matching_with_one_mismatch": "This is a non-canonical search filter that requires both ends of the acceptor stem to be present and match, allowing one mismatch"
}
def getFilters(self):
return (self.FILTERS, self.SET_UP_FILTERS)
def istRNA(self, seq, name):
problems = []
self.D_region_shift = 0
self.anticodon = []
self.name = name
#Finding the length of a potential 5' trail past the acceptor stem (only relevant for non-canonical)
for n in range(len(seq) - 43):
misses = 0
for j in range(7):
if seq[j + n] not in self.allowed_pairings[seq[-5 - j]]:
misses += 1
if misses < self.T_LOOP_AND_ACCEPTOR_GUIDELINES[2][1]:
self.D_region_shift = n
#Running the filters
for filt in self.FILTERS:
if not self.FILTERS[filt](seq):
open(self.output_path + filt, "a").write(self.name + "\n" + seq + "\n")
return filt
return ""
def change_anticodon_loop_guidelines(self, i, changeItTo):
self.ANTICODON_LOOP_GUIDELINES[i] = changeItTo
def getAnticodonGuidelines(self):
return self.ANTICODON_LOOP_GUIDELINES
def isAnticodonKnown(self, seq):
if self.anticodon == []:
self.extractor = extractor.Extractor()
fullLength = len(seq) > 70 and len(seq) < 100 and seq[7] == "T" and seq[13] == "A"
self.anticodon = self.extractor.extract_anticodon(seq, fullLength)
return not self.anticodon == []
def check_D_and_T_region_lengths(self, seq):
if self.isAnticodonKnown(seq):
for anti in self.anticodon:
for pos34 in self.ANTICODON_LOOP_GUIDELINES[1][0]:
for elem in [m.start() for m in re.finditer(pos34 + anti, seq)]:
if elem - self.D_region_shift in self.D_region_range and elem - len(seq) in self.T_region_range:
return True
return False
def get_t_loop_and_acceptor_guidelines(self):
return self.T_LOOP_AND_ACCEPTOR_GUIDELINES
def change_T_and_acc_guidelines(self, i, changeItTo):
if type(self.T_LOOP_AND_ACCEPTOR_GUIDELINES[i]) == int:
self.T_LOOP_AND_ACCEPTOR_GUIDELINES[i] = changeItTo
else:
self.T_LOOP_AND_ACCEPTOR_GUIDELINES[i].append(changeItTo)
def t_loop_and_acceptor(self, seq):
length = len(seq)
shortestMissed = [0] * (len(self.T_LOOP_AND_ACCEPTOR_GUIDELINES[0]) + 1)
for i in range(length - self.sub_size + 1):
sub_str = seq[-(i + self.sub_size):(length - i)]
missed = []
for position_tuple in self.T_LOOP_AND_ACCEPTOR_GUIDELINES[0]:
if sub_str[position_tuple[0]] != position_tuple[1]:
missed.append(position_tuple)
if len(missed) < self.T_LOOP_AND_ACCEPTOR_GUIDELINES[1] + 1:
if self.T_LOOP_AND_ACCEPTOR_GUIDELINES[2][0]:
misses = 0
for j in range(7):
if seq[j + self.D_region_shift] not in self.allowed_pairings[seq[-5 - j]]:
misses += 1
if misses < self.T_LOOP_AND_ACCEPTOR_GUIDELINES[2][1]:
return True
open(self.output_path + "Require_Acceptor_Stem_Matching_with_one_mismatch", "a").write(self.name + "\n" + seq + "\n")
else:
return True
if len(missed) < len(shortestMissed):
shortestMissed = missed
for elem in shortestMissed:
fileName = "require_"
if elem[0] > 0:
fileName += "T_Loop_"
else:
fileName += "acceptor_"
fileName += elem[1] + "_at_" + str(elem[0])
open(self.output_path + fileName, "a").write(self.name + "\n" + seq + "\n")
open(self.output_path + "Allow_one_mismatch_in_T-loop_and_acceptor", "a").write(self.name + "\n" + seq + "\n")
return False
| merenlab/tRNA-seq-tools | tRNASeqTools/filters.py | Python | gpl-3.0 | 10,988 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
BANKS = {
'RBC Banque Royale': {
'protocol': 'https',
'domain': 'www1.royalbank.com',
'path': '/cgi-bin/rbaccess/rbunxcgi?F6=1&F7=IB&F21=IB&F22=IB&REQUEST=ClientSignin&LANGUAGE=FRENCH',
},
'TD Canada Trust': {
'protocol': 'https',
'domain': 'banquenetcpo.td.com',
'path': '/waw/idp/login.htm?execution=e1s1',
},
'BDC': {
'protocol': 'https',
'domain':'connex.bdc.ca',
'path': '/_LOGIN/BDCConnexlogin.aspx',
},
'CIBC': {
'protocol': 'https',
'domain': 'www.cibc.com',
'path': '/ca/personal.html?int_id=HP_PersonalBanking',
},
'Banque de Montreal': {
'protocol': 'https',
'domain': 'www1.bmo.com',
'path': '/onlinebanking/cgi-bin/netbnx/NBmain?product=6',
},
'Scotiabank': {
'protocol': 'https',
'domain': 'www2.scotiaonline.scotiabank.com',
'path': '/online/authentication/authentication.bns',
},
'BNC': {
'protocol': 'https',
'domain': 'bvi.bnc.ca',
'path': '/',
},
'Banque Laurentienne': {
'protocol': 'https',
'domain': 'blcweb.banquelaurentienne.ca',
'path': '/BLCDirect/',
},
'Desjardins': {
'protocol': 'https',
'domain': 'accesd.desjardins.com',
'path': '/fr/accesd/',
},
}
template = (
"""define host {
use generic-host
host_name %(domain)s
address %(domain)s
alias %(domain)s
check_command check_dummy!0!OK
}
define service {
use generic-service
host_name %(domain)s
check_command check_http2!%(url)s
display_name %(bank)s
service_description %(domain)s
servicegroups banks
labels order_%(order)d
action_url %(url)s
}
""")
business_rule = (
"""
define host {
use generic-host
host_name banks
alias banks
check_command check_dummy!0!OK
}
define service {
use template_bprule
host_name banks
service_description banks
servicegroups main
display_name Banques
notes Services en ligne des principales banques.
check_command bp_rule!%(all_banks)s
business_rule_output_template $(x)$
icon_image fa-btc
}
""")
def main():
# all_banks is a workaround while we wait for g:group_banks to work
all_banks = []
for order, (bank, values) in enumerate(BANKS.iteritems()):
all_banks.append('%s,%s' % (values['domain'], values['domain']))
url = values['protocol'] + '://' + values['domain'] + values['path']
print template % {'bank': bank.replace('_', ' '),
'domain': values['domain'],
'order': order + 1,
'url': url}
all_banks = '&'.join(all_banks)
print business_rule % {'all_banks': all_banks}
if __name__ == '__main__':
main()
| savoirfairelinux/quebec-monitoring | scripts/banks.py | Python | agpl-3.0 | 3,447 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from sockjs.tornado import SockJSRouter, SockJSConnection
from snorky.client import Client
import json
class SockJSClient(Client):
"""A SockJS client."""
def __init__(self, req_handler):
super(SockJSClient, self).__init__()
self.req_handler = req_handler
@property
def remote_address(self):
"""IP address of the client"""
return self.req_handler.request.remote_ip
def send(self, msg):
"""Sends a message through the SockJS channel."""
self.req_handler.send(json.dumps(msg))
class SnorkySockJSHandler(SockJSConnection):
"""Handles SockJS connections.
A ``service_registry`` parameter must be specified for instances of this
request handler.
"""
def __init__(self, service_registry, *args, **kwargs):
self.service_registry = service_registry
self.client = SockJSClient(req_handler=self)
SockJSConnection.__init__(self, *args, **kwargs)
def on_open(self, info):
"""Executed when the connection is started."""
self.service_registry.client_connected(self.client)
def on_message(self, message):
"""Called when a message is received."""
self.service_registry.process_message_raw(self.client, message)
def on_close(self):
"""Called when the connection finalizes."""
self.service_registry.client_disconnected(self.client)
@classmethod
def get_routes(cls, service_registry, path=""):
"""Returns a list of routes matching this request handler,
suitable for use in :py:class:`tornado.web.Application`.
"""
# Since SockJS does not provide (AFAIK) a mechanism to pass arguments
# to the SockJSConnection constructor, we use an ad-hoc subclass
class ThisSockJSHandler(cls):
def __init__(self, *args, **kwargs):
cls.__init__(self, service_registry, *args, **kwargs)
return SockJSRouter(ThisSockJSHandler, path).urls
| ntrrgc/snorky | snorky/request_handlers/sockjs.py | Python | mpl-2.0 | 2,177 |
tracebacks = [
"""Traceback (most recent call last):
File "/lib/python2.6/site-packages/django_some_really_cool_module-py2.6.egg/module/class.py", line 99, in get_response
response = callback(request, *callback_args, **callback_kwargs)
File "/mnt/services/airship/lib/python2.6/site-packages/Django-1.1.3-py2.6.egg/django/views/decorators/vary.py", line 21, in inner_func
response = func(*args, **kwargs)
File ""/lib/python2.6/site-packages/django_some_really_cool_module-py2.6.egg/module/__init__.py", line 117, in __call__
result = resource.Resource.__call__(self, request, *args, **kwargs)
File ""/lib/python2.6/site-packages/django_some_really_cool_module-py2.6.egg/module/class.py", line 21, in inner_func
response = func(*args, **kwargs)
File "/lib/python2.6/site-packages/django_some_really_cool_module-py2.6.egg/module/class.py", line 166, in __call__
result = meth(request, *args, **kwargs)
File "api/things.py", line 34, in wrap
return handler_method(self, request, *args, **kwargs)
File "api/other_things.py", line 122, in wrapped_f
return f(self, request, data, *args, **kwargs)
File "api/endpoints/something.py", line 173, in EXECUTE
something.do_something)
File "/lib/python2.6/site-packages/django_some_really_cool_module-py2.6.egg/module/class.py", line 194, in enqueue_task
thingy = queue_obj.open_connection(task_thingy)
File "/lib/python2.6/site-packages/django_some_other_really_cool_module-py2.6.egg/module/class.py", line 140, in open_connection
return queue.Connection(settings.host, settings.port)
File "/lib/python2.6/site-packages/another_thing-2.6.egg/module/class.py"
SocketError.wrap(self._socket.connect, (self.host, self.port))
File "/lib/python2.6/site-packages/django_some_really_cool_module-py2.6.egg/module/class.py", line 43, in do_something
raise SocketError(e)
SocketError: [Errno 110] Connection timed out""",
"""me.prettyprint.hector.api.exceptions.HTimedOutException: org.apache.thrift.transport.TTransportException: java.net.SocketTimeoutException: Read timed out
at me.prettyprint.cassandra.service.ExceptionsTranslatorImpl.translate(ExceptionsTranslatorImpl.java:35)
at me.prettyprint.cassandra.service.KeyspaceServiceImpl$5.execute(KeyspaceServiceImpl.java:223)
at me.prettyprint.cassandra.service.KeyspaceServiceImpl$5.execute(KeyspaceServiceImpl.java:206)
at me.prettyprint.cassandra.service.Operation.executeAndSetResult(Operation.java:101)
at me.prettyprint.cassandra.connection.HConnectionManager.operateWithFailover(HConnectionManager.java:224)
at me.prettyprint.cassandra.service.KeyspaceServiceImpl.operateWithFailover(KeyspaceServiceImpl.java:129)
at me.prettyprint.cassandra.service.KeyspaceServiceImpl.getSlice(KeyspaceServiceImpl.java:227)
at me.prettyprint.cassandra.model.thrift.ThriftSliceQuery$1.doInKeyspace(ThriftSliceQuery.java:53)
at me.prettyprint.cassandra.model.thrift.ThriftSliceQuery$1.doInKeyspace(ThriftSliceQuery.java:49)
at me.prettyprint.cassandra.model.KeyspaceOperationCallback.doInKeyspaceAndMeasure(KeyspaceOperationCallback.java:20)
at me.prettyprint.cassandra.model.ExecutingKeyspace.doExecute(ExecutingKeyspace.java:85)
at me.prettyprint.cassandra.model.thrift.ThriftSliceQuery.execute(ThriftSliceQuery.java:48)
at com.urbanairship.helib.UACassandra.get(UACassandra.java:82)
at com.urbanairship.helib.UACassandra.get(UACassandra.java:91)
at com.urbanairship.helib.models.APID.get(APID.java:52)
at com.urbanairship.helib.tasks.CheckRegistration.processRegistration(CheckRegistration.java:135)
at com.urbanairship.helib.tasks.CheckRegistration.run(CheckRegistration.java:83)
at com.urbanairship.octobot.TaskExecutor.execute(TaskExecutor.java:19)
at com.urbanairship.octobot.QueueConsumer.invokeTask(QueueConsumer.java:197)
at com.urbanairship.octobot.QueueConsumer.consumeFromBeanstalk(QueueConsumer.java:106)
at com.urbanairship.octobot.QueueConsumer.run(QueueConsumer.java:49)
at java.lang.Thread.run(Thread.java:662)
Caused by: org.apache.thrift.transport.TTransportException: java.net.SocketTimeoutException: Read timed out
at org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:129)
at org.apache.thrift.transport.TTransport.readAll(TTransport.java:84)
at org.apache.thrift.transport.TFramedTransport.readFrame(TFramedTransport.java:129)
at org.apache.thrift.transport.TFramedTransport.read(TFramedTransport.java:101)
at org.apache.thrift.transport.TTransport.readAll(TTransport.java:84)
at org.apache.thrift.protocol.TBinaryProtocol.readAll(TBinaryProtocol.java:378)
at org.apache.thrift.protocol.TBinaryProtocol.readI32(TBinaryProtocol.java:297)
at org.apache.thrift.protocol.TBinaryProtocol.readMessageBegin(TBinaryProtocol.java:204)
at org.apache.cassandra.thrift.Cassandra$Client.recv_get_slice(Cassandra.java:530)
at org.apache.cassandra.thrift.Cassandra$Client.get_slice(Cassandra.java:512)
at me.prettyprint.cassandra.service.KeyspaceServiceImpl$5.execute(KeyspaceServiceImpl.java:211)
... 20 more
Caused by: java.net.SocketTimeoutException: Read timed out
at java.net.SocketInputStream.socketRead0(Native Method)
at java.net.SocketInputStream.read(SocketInputStream.java:129)
at java.io.BufferedInputStream.fill(BufferedInputStream.java:218)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:258)
at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
at org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:127)
... 30 more""",
]
| gmcquillan/firetower | firetower/tracebacks.py | Python | mit | 5,887 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Organization.name'
db.alter_column('organizations_organization', 'name', self.gf('django.db.models.fields.CharField')(max_length=200))
# Changing field 'Organization.slug'
db.alter_column('organizations_organization', 'slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=200, separator=u'-', unique=True, populate_from='name', overwrite=False))
def backwards(self, orm):
# Changing field 'Organization.name'
db.alter_column('organizations_organization', 'name', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'Organization.slug'
db.alter_column('organizations_organization', 'slug', self.gf('django_extensions.db.fields.AutoSlugField')(populate_from='name', allow_duplicates=False, max_length=100, separator=u'-', unique=True, overwrite=False))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'{model}'.format(model=AUTH_USER_MODEL.lower()): {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'organizations.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '200', 'separator': "u'-'", 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['{model}']".format(model=AUTH_USER_MODEL), 'through': "orm['organizations.OrganizationUser']", 'symmetrical': 'False'})
},
'organizations.organizationowner': {
'Meta': {'object_name': 'OrganizationOwner'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'owner'", 'unique': 'True', 'to': "orm['organizations.Organization']"}),
'organization_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'owned_organization'", 'unique': 'True', 'to': "orm['organizations.OrganizationUser']"})
},
'organizations.organizationuser': {
'Meta': {'ordering': "['organization', 'user']", 'unique_together': "(('user', 'organization'),)", 'object_name': 'OrganizationUser'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organization_users'", 'to': "orm['organizations.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organization_users'", 'to': "orm['{model}']".format(model=AUTH_USER_MODEL)})
}
}
complete_apps = ['organizations']
| aptivate/django-organizations | organizations/migrations/0004_auto__chg_field_organization_name__chg_field_organization_slug.py | Python | bsd-2-clause | 7,240 |
# -*- coding: utf-8 -*-
import re
korean_regex = r'.[a-zA-Z][a-zA-Z0-9][test.com]$'
kl_pattern = re.compile(korean_regex)
title_list = [
u'aaa.test.com',
u'1.test.com',
u'-.test.com',
u'bbb.bbb.test.com',
u'cc.cc.cc12.test.com',
u'dd.dd.ddtest.com/dd',
]
for title in title_list:
# print "input ------ %s" % (title)
title = title.encode('utf-8')
m = kl_pattern.match(title)
if m :
print title
print m
| motobyus/moto | util/regularExpression/subdomain.py | Python | mit | 469 |
from django.conf import settings
from django.db import models
# Create your models here.
User = settings.AUTH_USER_MODEL
def upload_location(instance, filename):
#extension = filename.split(".")[1]
location = str(instance.user.username)
return "%s/%s" %(location, filename)
class Profile(models.Model):
user = models.OneToOneField(User)
location = models.CharField(max_length=120, null=True, blank=True)
picture = models.ImageField(upload_to=upload_location, null=True, blank=True)
def __unicode__(self): #__str__(self):
return self.user.username | pachinko/matchmaker_profile | src/profiles/models.py | Python | mit | 563 |
#!/usr/bin/env python3.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import github3
import sys
githubent = github3.enterprise_login(
username=sys.argv[1],
password=sys.argv[2],
url=sys.argv[3])
for d in ["2014", "2015", "2016", "2017", "2018", "2019"]:
dateFilter = "created:<"+d+" and created:>"+str(int(d)-1)
for r in githubent.search_repositories("is:public and "+dateFilter):
print(d+","+r.clone_url)
| chrismattmann/drat | distribution/src/main/resources/bin/list-ghe-repos.py | Python | apache-2.0 | 1,171 |
"""
Support for Neato Connected Vaccums switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.neato/
"""
import logging
import requests
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.components.neato import NEATO_ROBOTS, NEATO_LOGIN
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['neato']
SWITCH_TYPE_CLEAN = 'clean'
SWITCH_TYPE_SCHEDULE = 'scedule'
SWITCH_TYPES = {
SWITCH_TYPE_CLEAN: ['Clean'],
SWITCH_TYPE_SCHEDULE: ['Schedule']
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Neato switches."""
dev = []
for robot in hass.data[NEATO_ROBOTS]:
for type_name in SWITCH_TYPES:
dev.append(NeatoConnectedSwitch(hass, robot, type_name))
_LOGGER.debug("Adding switches %s", dev)
add_devices(dev)
class NeatoConnectedSwitch(ToggleEntity):
"""Neato Connected Switches."""
def __init__(self, hass, robot, switch_type):
"""Initialize the Neato Connected switches."""
self.type = switch_type
self.robot = robot
self.neato = hass.data[NEATO_LOGIN]
self._robot_name = '{} {}'.format(
self.robot.name, SWITCH_TYPES[self.type][0])
try:
self._state = self.robot.state
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as ex:
_LOGGER.warning("Neato connection error: %s", ex)
self._state = None
self._schedule_state = None
self._clean_state = None
def update(self):
"""Update the states of Neato switches."""
_LOGGER.debug("Running switch update")
self.neato.update_robots()
try:
self._state = self.robot.state
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as ex:
_LOGGER.warning("Neato connection error: %s", ex)
self._state = None
return
_LOGGER.debug('self._state=%s', self._state)
if self.type == SWITCH_TYPE_CLEAN:
if (self.robot.state['action'] == 1 or
self.robot.state['action'] == 2 or
self.robot.state['action'] == 3 and
self.robot.state['state'] == 2):
self._clean_state = STATE_ON
else:
self._clean_state = STATE_OFF
_LOGGER.debug("Clean state: %s", self._clean_state)
if self.type == SWITCH_TYPE_SCHEDULE:
_LOGGER.debug("State: %s", self._state)
if self.robot.schedule_enabled:
self._schedule_state = STATE_ON
else:
self._schedule_state = STATE_OFF
_LOGGER.debug("Shedule state: %s", self._schedule_state)
@property
def name(self):
"""Return the name of the switch."""
return self._robot_name
@property
def available(self):
"""Return True if entity is available."""
return self._state
@property
def is_on(self):
"""Return true if switch is on."""
if self.type == SWITCH_TYPE_CLEAN:
if self._clean_state == STATE_ON:
return True
return False
elif self.type == SWITCH_TYPE_SCHEDULE:
if self._schedule_state == STATE_ON:
return True
return False
def turn_on(self, **kwargs):
"""Turn the switch on."""
if self.type == SWITCH_TYPE_CLEAN:
self.robot.start_cleaning()
elif self.type == SWITCH_TYPE_SCHEDULE:
self.robot.enable_schedule()
def turn_off(self, **kwargs):
"""Turn the switch off."""
if self.type == SWITCH_TYPE_CLEAN:
self.robot.pause_cleaning()
self.robot.send_to_base()
elif self.type == SWITCH_TYPE_SCHEDULE:
self.robot.disable_schedule()
| MungoRae/home-assistant | homeassistant/components/switch/neato.py | Python | apache-2.0 | 4,019 |
from django.db.models import Q
import sal.plugin
TITLES = {
'ok': 'Machines with Gatekeeper enabled',
'alert': 'Machines without Gatekeeper enabled',
'unknown': 'Machines with unknown Gatekeeper status'}
PLUGIN_Q = Q(pluginscriptsubmission__plugin='Gatekeeper')
SCRIPT_Q = Q(pluginscriptsubmission__pluginscriptrow__pluginscript_name='Gatekeeper')
class Gatekeeper(sal.plugin.Widget):
supported_os_families = [sal.plugin.OSFamilies.darwin]
def get_context(self, queryset, **kwargs):
queryset = queryset.filter(os_family='Darwin')
context = self.super_get_context(queryset, **kwargs)
context['ok'] = self._filter(queryset, 'ok').count()
context['alert'] = self._filter(queryset, 'alert').count()
context['unknown'] = queryset.count() - context['ok'] - context['alert']
return context
def filter(self, machines, data):
if data not in TITLES:
return None, None
return self._filter(machines, data), TITLES[data]
def _filter(self, machines, data):
machines = machines.filter(os_family='Darwin')
if data == 'ok':
machines = (
machines
.filter(PLUGIN_Q,
SCRIPT_Q,
pluginscriptsubmission__pluginscriptrow__pluginscript_data='Enabled'))
elif data == 'alert':
machines = (
machines
.filter(PLUGIN_Q,
SCRIPT_Q,
pluginscriptsubmission__pluginscriptrow__pluginscript_data='Disabled'))
elif data == 'unknown':
machines = (
machines
.exclude(pk__in=self._filter(machines, 'ok').values('pk'))
.exclude(pk__in=self._filter(machines, 'alert').values('pk')))
return machines
| salopensource/sal | server/plugins/gatekeeper/gatekeeper.py | Python | apache-2.0 | 1,854 |
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n_gcp.actions import SetIamPolicy
from c7n_gcp.provider import resources
from c7n_gcp.query import QueryResourceManager, TypeInfo
@resources.register('organization')
class Organization(QueryResourceManager):
"""GCP resource: https://cloud.google.com/resource-manager/reference/rest/v1/organizations
"""
class resource_type(TypeInfo):
service = 'cloudresourcemanager'
version = 'v1'
component = 'organizations'
scope = 'global'
enum_spec = ('search', 'organizations[]', {'body': {}})
id = 'name'
@Organization.action_registry.register('set-iam-policy')
class OrganizationSetIamPolicy(SetIamPolicy):
"""
Overrides the base implementation to process Organization resources correctly.
"""
def _verb_arguments(self, resource):
verb_arguments = SetIamPolicy._verb_arguments(self, resource)
verb_arguments['body'] = {}
return verb_arguments
@resources.register('folder')
class Folder(QueryResourceManager):
"""GCP resource: https://cloud.google.com/resource-manager/reference/rest/v1/folders
"""
class resource_type(TypeInfo):
service = 'cloudresourcemanager'
version = 'v2'
component = 'folders'
scope = 'global'
enum_spec = ('list', 'folders', None)
id = 'name'
def get_resource_query(self):
if 'query' in self.data:
for child in self.data.get('query'):
if 'parent' in child:
return {'parent': child['parent']}
@resources.register('project')
class Project(QueryResourceManager):
"""GCP resource: https://cloud.google.com/compute/docs/reference/rest/v1/projects
"""
class resource_type(TypeInfo):
service = 'cloudresourcemanager'
version = 'v1'
component = 'projects'
scope = 'global'
enum_spec = ('list', 'projects', None)
id = 'projectId'
@Project.action_registry.register('set-iam-policy')
class ProjectSetIamPolicy(SetIamPolicy):
"""
Overrides the base implementation to process Project resources correctly.
"""
def _verb_arguments(self, resource):
verb_arguments = SetIamPolicy._verb_arguments(self, resource)
verb_arguments['body'] = {}
return verb_arguments
| Sutto/cloud-custodian | tools/c7n_gcp/c7n_gcp/resources/resourcemanager.py | Python | apache-2.0 | 2,882 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPytestMock(PythonPackage):
"""Thin-wrapper around the mock package for easier use with py.test"""
homepage = "https://github.com/pytest-dev/pytest-mock"
url = "https://pypi.io/packages/source/p/pytest-mock/pytest-mock-1.2.zip"
version('1.2', 'a7fa820f7bc71698660945836ff93c73')
extends('python', ignore=r'bin/*')
depends_on('py-setuptools', type='build')
depends_on('py-pytest@2.7:', type=('build', 'run'))
depends_on('py-mock', type=('build', 'run'))
| skosukhin/spack | var/spack/repos/builtin/packages/py-pytest-mock/package.py | Python | lgpl-2.1 | 1,766 |
import bpy
from math import sqrt, asin, sin, cos, atan, pi
from functools import reduce
from fashion_project.modules.draw.lines import is_one_of_lines, get_line_length, selection_is_contour
from fashion_project.modules.draw.lines.line_for_dart import LineForDart
from fashion_project.modules.draw.points.point import Point
from fashion_project.modules.draw.points import get_absolute_angle, get_angle, is_one_of_points
from fashion_project.modules.utils import get_point_abs_location, mouse, fp_expression
from fashion_project.modules.utils.mathlib import get_distance
from fashion_project.modules.draw.points.point_on_line import PointOnLine
from fashion_project.modules.operators.wires.proto import FP_WireProto
TARGET_LOCATION = (0.0,0.0,0.0)
DIF = 0
FLAG = True
IMAX = 0
IMIN = 0
_ANGLE = 1
''' One at a Time Lock '''
OAAT_LOCK = False
class FP_Modeling(bpy.types.Operator):
'''
Инструмент моделирование
Требует выделенный контур.
'''
bl_idname = "fp.modeling"
bl_label = "FP Modeling"
@classmethod
def poll(cls, context):
return (not OAAT_LOCK)
def execute(self, context):
return {'FINISHED'}
class FP_UpdateContour(bpy.types.Operator, FP_WireProto):
'''
'''
bl_idname = 'fp.draw_update_contour'
bl_label = 'Draw update contour'
@classmethod
def poll(cls, context):
contour = False
line_for_dart = []
for obj in context.selected_objects:
if obj.fp_type == LineForDart.FP_TYPE:
line_for_dart += [obj]
if len(line_for_dart) < 2:
contour = False
else:
if line_for_dart[0].fp_deps[0] == line_for_dart[1].fp_deps[0]:
point_id = line_for_dart[0].fp_deps[0]
contour = True
elif line_for_dart[0].fp_deps[1] == line_for_dart[1].fp_deps[0]:
point_id = line_for_dart[0].fp_deps[1]
contour = True
elif line_for_dart[0].fp_deps[0] == line_for_dart[1].fp_deps[1]:
point_id = line_for_dart[0].fp_deps[0]
contour = True
elif line_for_dart[0].fp_deps[1] == line_for_dart[1].fp_deps[1]:
point_id = line_for_dart[0].fp_deps[1]
contour = True
else:
contour = False
if contour:
for i in range(len(line_for_dart)-2):
if line_for_dart[i+2].fp_deps[0] == point_id or line_for_dart[i+2].fp_deps[1] == point_id:
contour = True
else:
contour = False
break
return (
contour
and selection_is_contour()
)
def on_before_modal(self):
global OAAT_LOCK
self.TARGET_LOCATION = mouse.get_coords_location_3d()
OAAT_LOCK = True
def on_before_finish(self):
global OAAT_LOCK, FLAG
FLAG = True
OAAT_LOCK = False
def draw_callback(self, context):
global TARGET_LOCATION, _ANGLE, FLAG, IMAX, IMIN
mouse_coords_3 = mouse.get_coords_location_3d()
if FLAG:
TARGET_LOCATION = mouse_coords_3;
self.DIF = mouse_coords_3[0] - TARGET_LOCATION[0]
if mouse.get_event()[4] == 'TAB' and FLAG:
TARGET_LOCATION = mouse_coords_3
FLAG = False
TARGET_LOCATION = mouse_coords_3
_ANGLE = self.DIF * 5
dart_line = [line for line in context.selected_objects if line.fp_type == LineForDart.FP_TYPE]
if dart_line[0].fp_deps[0] == dart_line[1].fp_deps[0]:
for obj in bpy.data.objects:
if obj.fp_id == dart_line[0].fp_deps[0]:
center = obj
break
elif dart_line[0].fp_deps[0] == dart_line[1].fp_deps[1]:
for obj in bpy.data.objects:
if obj.fp_id == dart_line[0].fp_deps[0]:
center = obj
break
else:
for obj in bpy.data.objects:
if obj.fp_id == dart_line[0].fp_deps[1]:
center = obj
break
points_on_dart_line = []
for line in dart_line:
tmp_pts = [center]
for obj in bpy.data.objects:
if (obj.fp_id == line.fp_deps[0] or obj.fp_id == line.fp_deps[1]) and obj != center:
tmp_pts += [obj]
points_on_dart_line += [tmp_pts]
angle_dart_line = []
for points in points_on_dart_line:
angle_dart_line += [get_absolute_angle(get_point_abs_location(points[0]), get_point_abs_location(points[1]))]
all_limit_lines = []
for line in bpy.data.objects:
if (line.fp_deps[0] == center.fp_id or line.fp_deps[1] == center.fp_id) and line != dart_line[0] and line != dart_line[1]:
all_limit_lines += [line]
all_point_on_limit_lines = []
for line in all_limit_lines:
tmp_pts = [center]
for obj in bpy.data.objects:
if (obj.fp_id == line.fp_deps[0] or obj.fp_id == line.fp_deps[1]) and obj != center:
tmp_pts += [obj]
all_point_on_limit_lines += [tmp_pts]
angle_limit_line = []
for points in all_point_on_limit_lines:
angle_limit_line += [get_absolute_angle(get_point_abs_location(points[0]), get_point_abs_location(points[1]))]
angle_limit = []
limit_lines = []
tmp_min = 10000
tmp_max = -10000
if angle_dart_line[0] < angle_dart_line[1]:
for angle in angle_limit_line:
if angle > tmp_max and angle <= angle_dart_line[0]:
tmp_max = angle
if tmp_max == -10000:
angle_limit += [min(angle_limit_line)]
else:
angle_limit += [tmp_max]
for angle in angle_limit_line:
if angle < tmp_min and angle >= angle_dart_line[1]:
tmp_min = angle
if tmp_min == 10000:
angle_limit += [max(angle_limit_line)]
else:
angle_limit += [tmp_min]
else:
for angle in angle_limit_line:
if angle < tmp_min and angle >= angle_dart_line[0]:
tmp_min = angle
if tmp_min == 10000:
angle_limit += [max(angle_limit_line)]
else:
angle_limit += [tmp_min]
for angle in angle_limit_line:
if angle > tmp_max and angle <= angle_dart_line[1]:
tmp_max = angle
if tmp_max == -10000:
angle_limit += [min(angle_limit_line)]
else:
angle_limit += [tmp_max]
for angle in angle_limit:
for i in range(len(all_limit_lines)):
if angle_limit_line[i] == angle:
limit_lines += [all_limit_lines[i]]
break
dif_angle = [round(angle_dart_line[i]-angle_limit[i], 1) for i in range(len(angle_limit))]
if FLAG:
if dif_angle[0] > 0:
IMAX = 0
IMIN = 1
else:
IMAX = 1
IMIN = 0
for i in range(len(dif_angle)):
if i == IMAX:
print("_ANGLE", _ANGLE)
if _ANGLE < 0 and dif_angle[i] < 0.9:
_ANGLE = 0
if i == IMIN:
print("_ANGLE", _ANGLE)
if _ANGLE > 0 and dif_angle[i] > -0.9:
_ANGLE = 0
points_deps_for_update = []
points_parent_for_update = []
point_start_for_update = []
all_points_of_select = []
for obj in context.selected_objects:
count = 0
for point in bpy.data.objects:
if (obj.fp_deps[0] == point.fp_id or obj.fp_deps[1] == point.fp_id) and is_one_of_points(point) and all(point != p for p in all_points_of_select):
all_points_of_select += [point]
count += 1
if count == 2:
break
for point in all_points_of_select:
if point.fp_id == 1:
point_start_for_update += [point]
continue
for obj in bpy.data.objects:
if obj.parent == point and all(obj != p for p in all_points_of_select):
points_deps_for_update += [obj]
if point.parent == obj and all(obj != p for p in all_points_of_select) and point != center:
points_parent_for_update += [point]
if len(points_deps_for_update) > 0:
old_location = []
for point in points_deps_for_update:
old_location += [get_point_abs_location(point)]
if len(point_start_for_update) > 0:
R = round(get_distance(get_point_abs_location(point_start_for_update[0]), get_point_abs_location(center)), 3)
angle = round(get_absolute_angle(get_point_abs_location(center), get_point_abs_location(point_start_for_update[0])), 1)
location_center = get_point_abs_location(center)
angle += _ANGLE
new_location = []
new_location += [location_center[0] + R*cos(angle*pi/180)]
new_location += [location_center[1] + R*sin(angle*pi/180)]
distance = round(get_distance([0,0], new_location), 3)
new_angle = round(get_angle([0,0], new_location), 1)
point_start_for_update[0].fp_expression = str(distance)
point_start_for_update[0].fp_angle = new_angle
if center.parent == point_start_for_update[0]:
center.fp_angle += _ANGLE
elif len(points_parent_for_update) > 0:
R = round(get_distance(get_point_abs_location(points_parent_for_update[0]), get_point_abs_location(center)), 3)
angle = round(get_absolute_angle(get_point_abs_location(center), get_point_abs_location(points_parent_for_update[0])), 1)
location_center = get_point_abs_location(center)
angle += _ANGLE
new_location = []
new_location += [location_center[0] + R*cos(angle*pi/180)]
new_location += [location_center[1] + R*sin(angle*pi/180)]
distance = round(get_distance(get_point_abs_location(points_parent_for_update[0].parent), new_location), 3)
new_angle = round(get_angle(get_point_abs_location(points_parent_for_update[0].parent), new_location), 1)
points_parent_for_update[0].fp_expression = str(distance)
points_parent_for_update[0].fp_angle = new_angle
if center.parent == points_parent_for_update[0]:
center.fp_angle += _ANGLE
for point in all_points_of_select:
if point != center and all(point != obj for obj in point_start_for_update) and all(point != obj for obj in points_parent_for_update) and all(point != obj for obj in points_deps_for_update):
point.fp_angle += _ANGLE
if len(points_deps_for_update) > 0:
new_location_parent = []
for point in points_deps_for_update:
new_location_parent += [get_point_abs_location(point.parent)]
for i in range(len(points_deps_for_update)):
distance = round(get_distance(new_location_parent[i], old_location[i]), 3)
new_angle = round(get_angle(new_location_parent[i], old_location[i]), 1)
points_deps_for_update[i].fp_expression = str(distance)
points_deps_for_update[i].fp_angle = new_angle
class FP_ModalModeling(bpy.types.Macro):
bl_idname = "fp_modal_modeling"
bl_label = "Modal Modeling"
def define_macros():
FP_ModalModeling.define('FP_OT_draw_update_contour')
FP_ModalModeling.define('FP_OT_modeling')
clss = [
FP_UpdateContour,
FP_Modeling,
FP_ModalModeling
]
def register():
for cls in clss:
bpy.utils.register_class(cls)
define_macros()
def unregister():
for cls in clss:
bpy.utils.unregister_class(cls) | TriumphLLC/FashionProject | modules/operators/tools/details/modeling.py | Python | gpl-3.0 | 10,909 |
import keras
import pickle
from videotest import VideoTest
import sys
sys.path.append("..")
from ssd import SSD300 as SSD
input_shape = (300,300,3)
# Change this if you run with other classes than VOC
class_names = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"];
NUM_CLASSES = len(class_names)
model = SSD(input_shape, num_classes=NUM_CLASSES)
# Change this path if you want to use your own trained weights
model.load_weights('../weights_SSD300.hdf5')
vid_test = VideoTest(class_names, model, input_shape)
# To test on webcam 0, remove the parameter (or change it to another number
# to test on that webcam)
vid_test.run('path/to/your/video.mkv')
| rykov8/ssd_keras | testing_utils/videotest_example.py | Python | mit | 826 |
# -*- coding: utf-8 -*-
from orator.orm import Factory, Model, belongs_to, has_many
from orator.connections import SQLiteConnection
from orator.connectors import SQLiteConnector
from .. import OratorTestCase, mock
class FactoryTestCase(OratorTestCase):
@classmethod
def setUpClass(cls):
Model.set_connection_resolver(DatabaseConnectionResolver())
@classmethod
def tearDownClass(cls):
Model.unset_connection_resolver()
def connection(self):
return Model.get_connection_resolver().connection()
def schema(self):
return self.connection().get_schema_builder()
def setUp(self):
with self.schema().create("users") as table:
table.increments("id")
table.string("name").unique()
table.string("email").unique()
table.boolean("admin").default(True)
table.timestamps()
with self.schema().create("posts") as table:
table.increments("id")
table.integer("user_id")
table.string("title").unique()
table.text("content").unique()
table.timestamps()
table.foreign("user_id").references("id").on("users")
self.factory = Factory()
@self.factory.define(User)
def users_factory(faker):
return {"name": faker.name(), "email": faker.email(), "admin": False}
@self.factory.define(User, "admin")
def users_factory(faker):
attributes = self.factory.raw(User)
attributes.update({"admin": True})
return attributes
@self.factory.define(Post)
def posts_factory(faker):
return {"title": faker.sentence(), "content": faker.text()}
def tearDown(self):
self.schema().drop("posts")
self.schema().drop("users")
def test_factory_make(self):
user = self.factory.make(User)
self.assertIsInstance(user, User)
self.assertIsNotNone(user.name)
self.assertIsNotNone(user.email)
self.assertIsNone(User.where("name", user.name).first())
def test_factory_create(self):
user = self.factory.create(User)
self.assertIsInstance(user, User)
self.assertIsNotNone(user.name)
self.assertIsNotNone(user.email)
self.assertIsNotNone(User.where("name", user.name).first())
def test_factory_create_with_attributes(self):
user = self.factory.create(User, name="foo", email="foo@bar.com")
self.assertIsInstance(user, User)
self.assertEqual("foo", user.name)
self.assertEqual("foo@bar.com", user.email)
self.assertIsNotNone(User.where("name", user.name).first())
def test_factory_create_with_relations(self):
users = self.factory.build(User, 3)
users = users.create().each(lambda u: u.posts().save(self.factory.make(Post)))
self.assertEqual(3, len(users))
self.assertIsInstance(users[0], User)
self.assertEqual(3, User.count())
self.assertEqual(3, Post.count())
def test_factory_call(self):
user = self.factory(User).create()
self.assertFalse(user.admin)
users = self.factory(User, 3).create()
self.assertEqual(3, len(users))
self.assertFalse(users[0].admin)
admin = self.factory(User, "admin").create()
self.assertTrue(admin.admin)
admins = self.factory(User, "admin", 3).create()
self.assertEqual(3, len(admins))
self.assertTrue(admins[0].admin)
class User(Model):
__guarded__ = ["id"]
@has_many("user_id")
def posts(self):
return Post
class Post(Model):
__guarded__ = []
@belongs_to("user_id")
def user(self):
return User
class DatabaseConnectionResolver(object):
_connection = None
def connection(self, name=None):
if self._connection:
return self._connection
self._connection = SQLiteConnection(
SQLiteConnector().connect({"database": ":memory:"})
)
return self._connection
def get_default_connection(self):
return "default"
def set_default_connection(self, name):
pass
| sdispater/orator | tests/orm/test_factory.py | Python | mit | 4,197 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matthias Klumpp <mak@debian.org>
# Copyright (C) 2013 Wilson Xu <imwilsonxu@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version
# 3.0 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program.
import os
from flask import Blueprint, render_template, send_from_directory, abort
from flask import current_app as APP
from flask.ext.security import login_required, current_user
from .models import User
user = Blueprint('user', __name__, url_prefix='/user')
@user.route('/')
@login_required
def index():
if not current_user.is_authenticated():
abort(403)
return render_template('user/index.html', user=current_user)
@user.route('/<int:user_id>/profile')
def profile(user_id):
user = User.get_by_id(user_id)
return render_template('user/profile.html', user=user)
@user.route('/<int:user_id>/avatar/<path:filename>')
@login_required
def avatar(user_id, filename):
dir_path = os.path.join(APP.config['UPLOAD_FOLDER'], 'user_%s' % user_id)
return send_from_directory(dir_path, filename, as_attachment=True)
| limba-project/limba-hub | lihub/user/views.py | Python | gpl-3.0 | 1,576 |
#!/usr/bin/env python
# @package vfnow
# \author Ed Bueler and Constantine Khroulev, University of Alaska Fairbanks, USA
# \brief A script for verification of numerical schemes in PISM.
# \details It specifies a refinement path for each of Tests ABCDEFGIJKL and runs
# pismv accordingly.
# Copyright (C) 2007--2013, 2015, 2016 Ed Bueler and Constantine Khroulev
##
# Organizes the process of verifying PISM. It specifies standard refinement paths for each of the tests described in the user manual. It runs the tests, times them, and summarizes the numerical errors reported at the end.
##
# Examples:
# - \verbatim vfnow.py \endverbatim use one processor and do three levels of refinement; this command is equivalent to \verbatim vfnow.py -n 2 -l 2 -t CGIJ \endverbatim,
# - \verbatim vfnow.py -n 8 -l 5 -t J --prefix=bin/ --mpido='aprun -n' \endverbatim will use \verbatim aprun -n 8 bin/pismv \endverbatim as the command and do five levels (the maximum) of refinement only on test J,
# - \verbatim vfnow.py -n 2 -l 3 -t CEIJGKLO \endverbatim uses two processers (cores) and runs in about an hour,
# - \verbatim vfnow.py -n 40 -l 5 -t ABCDEFGIJKLO \endverbatim will use forty processors to do all possible verification as managed by \c vfnow.py; don't run this unless you have a big computer and you are prepared to wait.
# For a list of options do \verbatim test/vfnow.py --help \endverbatim.
# Timing information is given in the \c vfnow.py output so performance, including parallel performance, can be assessed along with accuracy.
import sys
import time
import commands
from numpy import array
# A class describing a refinement path and command-line options
# for a particular PISM verification test.
class PISMVerificationTest:
# max number of levels that will work with
N = 50
# one-letter test name
name = ""
# test description
test = ""
# description of the refinement path
path = ""
Mx = []
My = []
# 31 levels in the ice
Mz = [31] * N
# no bedrock by default
Mbz = [1] * N
# extra options (such as -y, -ys, -ssa_rtol)
opts = ""
executable = "pismv"
def build_command(self, exec_prefix, level):
M = zip(self.Mx, self.My, self.Mz, self.Mbz)
if level > len(M):
print "Test %s: Invalid refinement level: %d (only %d are available)" % (
self.name, level, len(M))
return ""
grid_options = "-Mx %d -My %d -Mz %d -Mbz %d" % M[level - 1]
return "%s%s -test %s %s %s" % (exec_prefix, self.executable, self.name, grid_options, self.opts)
def run_test(executable, name, level, extra_options="", debug=False):
try:
test = tests[name]
except:
print "Test %s not found." % name
return
if level == 1:
print "# ++++ TEST %s: verifying with %s exact solution ++++\n# %s" % (
test.name, test.test, test.path)
else:
extra_options += " -append"
command = test.build_command(executable, level) + " " + extra_options
if debug:
print '# L%d\n%s' % (level, command)
return
else:
print ' L%d: trying "%s"' % (level, command)
# run PISM:
try:
lasttime = time.time()
(status, output) = commands.getstatusoutput(command)
elapsetime = time.time() - lasttime
except KeyboardInterrupt:
sys.exit(2)
if status:
sys.exit(status)
print ' finished in %7.4f seconds; reported numerical errors as follows:' % elapsetime
# process the output:
position = output.find('NUMERICAL ERRORS')
if position >= 0:
report = output[position:output.find('NUM ERRORS DONE')]
endline = report.find('\n')
print ' ' + report[0:endline]
report = report[endline + 1:]
while (len(report) > 1) and (endline > 0):
endline = report.find('\n')
if endline == -1:
endline = len(report)
print ' #' + report[0:endline]
report = report[endline + 1:]
endline = report.find('\n')
if endline == -1:
endline = len(report)
print ' |' + report[0:endline]
report = report[endline + 1:]
else:
print " ERROR: can't find reported numerical error"
sys.exit(99)
def define_refinement_paths(KSPRTOL, SSARTOL):
# Define all the supported refinement paths:
tests = {}
# A
A = PISMVerificationTest()
A.name = "A"
A.test = "steady, marine margin isothermal SIA"
A.path = "(refine dx=53.33,40,26.67,20,13.33,km, dx=dy and Mx=My=31,41,61,81,121)"
A.Mx = [31, 41, 61, 81, 121]
A.My = A.Mx
A.opts = "-y 25000.0"
tests['A'] = A
# B
B = PISMVerificationTest()
B.name = "B"
B.test = "moving margin isothermal SIA (Halfar)"
B.path = "(refine dx=80,60,40,30,20,km, dx=dy and Mx=My=31,41,61,81,121)"
B.Mx = [31, 41, 61, 81, 121]
B.My = B.Mx
B.opts = "-ys 422.45 -y 25000.0"
tests['B'] = B
# C
C = PISMVerificationTest()
C.name = "C"
C.test = "non-zero accumulation moving margin isothermal SIA"
C.path = "(refine dx=50,33.33,25,20,16,km, dx=dy and Mx=My=41,61,81,101,121)"
C.Mx = [41, 61, 81, 101, 121]
C.My = C.Mx
C.opts = "-y 15208.0"
tests['C'] = C
# D
D = PISMVerificationTest()
D.name = "D"
D.test = "time-dependent isothermal SIA"
D.path = "(refine dx=50,33.33,25,20,16.67,km, dx=dy and Mx=My=41,61,81,101,121)"
D.Mx = [41, 61, 81, 101, 121]
D.My = D.Mx
D.opts = "-y 25000.0"
tests['D'] = D
# E
E = PISMVerificationTest()
E.name = "E"
E.test = "steady sliding marine margin isothermal SIA"
E.path = "(refine dx=53.33,40,26.67,20,13.33,km, dx=dy and Mx=My=31,41,61,81,121)"
E.Mx = [31, 41, 61, 81, 121]
E.My = E.Mx
E.opts = "-y 25000.0"
tests['E'] = E
# F
F = PISMVerificationTest()
F.name = "F"
F.test = "steady thermomechanically-coupled SIA"
F.path = "(refine dx=30,20,15,10,7.5,km, dx=dy, dz=66.67,44.44,33.33,22.22,16.67 m and Mx=My=Mz=61,91,121,181,241)"
F.Mx = [61, 91, 121, 181, 241]
F.My = F.Mx
F.Mz = F.Mx
F.opts = "-y 25000.0"
tests['F'] = F
# G
G = PISMVerificationTest()
G.name = "G"
G.test = "time-dependent thermomechanically-coupled SIA"
G.path = "(refine dx=30,20,15,10,7.5,km, dx=dy, dz=66.67,44.44,33.33,22.22,16.67 m and Mx=My=Mz=61,91,121,181,241)"
G.Mx = [61, 91, 121, 181, 241]
G.My = G.Mx
G.Mz = G.Mx
G.opts = "-y 25000.0"
tests['G'] = G
# H
H = PISMVerificationTest()
H.name = "H"
H.test = "moving margin, isostatic bed, isothermal SIA"
H.path = "(refine dx=80,60,40,30,20,km, dx=dy and Mx=My=31,41,61,81,121)"
H.Mx = [31, 41, 61, 81, 121]
H.My = H.Mx
H.opts = "-bed_def iso -y 60000.0"
tests['H'] = H
# I
I = PISMVerificationTest()
I.executable = "ssa_testi"
I.name = "I"
I.test = "plastic till ice stream (SSA)"
I.path = "(refine dy=5000,1250,312.5,78.13,19.53,m, My=49,193,769,3073,12289)"
I.Mx = [5] * 5
I.My = [49, 193, 769, 3073, 12289]
I.executable = "ssa_testi"
I.opts = "-ssa_method fd -ssa_rtol %1.e -ssafd_ksp_rtol %1.e" % (SSARTOL, KSPRTOL)
tests['I'] = I
# J
J = PISMVerificationTest()
J.executable = "ssa_testj"
J.name = "J"
J.test = "periodic ice shelf (linearized SSA)"
J.Mx = [49, 98, 196, 392, 784]
J.My = J.Mx
J.path = "(refine Mx={})".format(J.Mx)
J.Mz = [11] * 5
J.executable = "ssa_testj"
J.opts = "-ssa_method fd -ssafd_pc_type asm -ssafd_sub_pc_type lu -ssafd_ksp_rtol %1.e" % KSPRTOL
tests['J'] = J
# K
K = PISMVerificationTest()
K.name = "K"
K.test = "pure conduction problem in ice and bedrock"
K.path = "(refine dz=100,50,25,12.5,6.25,m, Mz=41,81,161,321,641)"
K.Mx = [8] * 5
K.My = K.Mx
K.Mz = array([41, 81, 161, 321, 641])
K.Mbz = (K.Mz - 1) / 4 + 1
K.opts = "-y 130000.0 -Lbz 1000 -z_spacing equal"
tests['K'] = K
# L
L = PISMVerificationTest()
L.name = "L"
L.test = "non-flat bed stead isothermal SIA"
L.path = "(refine dx=60,30,20,15,10,km, dx=dy and Mx=My=31,61,91,121,181)"
L.Mx = [31, 61, 91, 121, 181]
L.My = L.Mx
L.opts = "-y 25000.0"
tests['L'] = L
# M
M = PISMVerificationTest()
M.name = "M"
M.test = "annular ice shelf with a calving front (SSA)"
M.path = "(refine dx=50,25,16.666,12.5,8.333 km; dx=dy and My=31,61,91,121,181)"
M.Mx = [31, 61, 91, 121, 181]
M.My = M.Mx
M.Mz = [11] * 5
M.opts = "-ssa_rtol %1.e -ssafd_ksp_rtol %1.e" % (SSARTOL, KSPRTOL)
tests['M'] = M
# O
O = PISMVerificationTest()
O.name = "O"
O.test = "basal melt rate from conduction problem in ice and bedrock"
O.path = "(refine dz=100,50,25,12.5,6.25,m, Mz=41,81,161,321,641)"
O.Mx = [8] * 5
O.My = O.Mx
O.Mz = array([41, 81, 161, 321, 641])
O.Mbz = (O.Mz - 1) / 4 + 1
O.opts = "-z_spacing equal -zb_spacing equal -Lbz 1000 -y 1000 -no_mass"
tests['O'] = O
# test K (for a figure in the User's Manual)
K = PISMVerificationTest()
K.name = "K"
K.test = "pure conduction problem in ice and bedrock"
K.path = "(lots of levels)"
K.Mz = array([101, 121, 141, 161, 181, 201, 221, 241, 261, 281, 301, 321])
K.Mbz = (K.Mz - 1) / 4 + 1
K.Mx = [8] * len(K.Mz)
K.My = K.Mx
K.opts = "-y 130000.0 -Lbz 1000"
tests['K_manual'] = K
# test B (for a figure in the User's Manual)
B = PISMVerificationTest()
B.name = "B"
B.test = "moving margin isothermal SIA (Halfar)"
B.path = "(lots of levels)"
B.Mx = [31, 41, 51, 61, 71, 81, 91, 101, 111, 121]
B.My = B.Mx
B.Mz = [31] * len(B.Mx)
B.Mbz = [1] * len(B.Mx)
B.opts = "-ys 422.45 -y 25000.0"
tests['B_manual'] = B
# test G (for a figure in the User's Manual)
G = PISMVerificationTest()
G.name = "G"
G.test = "time-dependent thermomechanically-coupled SIA"
G.path = "(lots of levels)"
G.Mx = [61, 71, 81, 91, 101, 111, 121, 151, 181]
G.My = G.Mx
G.Mz = G.Mx
G.opts = "-y 25000.0"
tests['G_manual'] = G
# test I (for a figure in the User's Manual)
I = PISMVerificationTest()
I.executable = "ssa_testi"
I.name = "I"
I.test = "plastic till ice stream (SSA)"
I.path = "(lots of levels)"
I.My = [51, 101, 151, 201, 401, 601, 801, 1001, 1501, 2001, 2501, 3073]
I.Mx = [5] * len(I.My)
I.opts = "-ssa_method fd -ssa_rtol %1.e -ssafd_ksp_rtol %1.e" % (SSARTOL, KSPRTOL)
tests['I_manual'] = I
return tests
from argparse import ArgumentParser
parser = ArgumentParser()
parser.description = """PISM verification script"""
parser.add_argument("--eta", dest="eta", action="store_true",
help="to add '-eta' option to pismv call")
parser.add_argument("-l", dest="levels", type=int, default=2,
help="number of levels of verification; '-l 1' fast, '-l 5' slowest")
parser.add_argument("--mpido", dest="mpido", default="mpiexec -np",
help="specify MPI executable (e.g. 'mpirun -np' or 'aprun -n')")
parser.add_argument("-n", dest="n", type=int, default=2,
help="number of processors to use")
parser.add_argument("--prefix", dest="prefix", default="",
help="path prefix to pismv executable")
parser.add_argument("-r", dest="report_file", default="",
help="name of the NetCDF error report file")
parser.add_argument("-t", dest="tests", nargs="+",
help="verification tests to use (A,B,C,D,E,F,G,H,I,J,K,L,M,O); specify a space-separated list", default=['C', 'G', 'I', 'J'])
parser.add_argument("-u", dest="unequal", action="store_true",
help="use quadratic vertical grid spacing")
parser.add_argument("--debug", dest="debug", action="store_true",
help="just print commands in sequence (do not run pismv)")
parser.add_argument("--manual", dest="manual", action="store_true",
help="run tests necessary to produce figures in the User's Manual")
options = parser.parse_args()
extra_options = ""
if options.eta:
extra_options += " -eta"
if options.unequal:
extra_options += " -z_spacing quadratic"
if options.report_file:
extra_options += " -report_file %s" % options.report_file
predo = ""
if options.n > 1:
predo = "%s %d " % (options.mpido, options.n)
exec_prefix = predo + options.prefix
KSPRTOL = 1e-12 # for tests I, J, M
SSARTOL = 5e-7 # ditto
tests = define_refinement_paths(KSPRTOL, SSARTOL)
manual_tests = ["B_manual", "G_manual", "K_manual", "I_manual"]
if options.manual:
print "# VFNOW.PY: test(s) %s, using '%s...'\n" % (manual_tests, exec_prefix) + \
"# and ignoring options -t and -l"
for test in manual_tests:
N = len(tests[test].Mx)
for j in range(1, N + 1):
run_test(exec_prefix, test, j, extra_options,
options.debug)
else:
print "# VFNOW.PY: test(s) %s, %d refinement level(s), using '%s...'" % (
options.tests, options.levels, exec_prefix)
for test in options.tests:
for j in range(1, options.levels + 1):
run_test(exec_prefix, test, j, extra_options,
options.debug)
| talbrecht/pism_pik | test/vfnow.py | Python | gpl-3.0 | 13,390 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
Zona = {
"Zona_1":["La Boca","Calzada de Amador"],
"Zona_2":["San Felipe","Chorrillo","Santa Ana","Ancón"],
"Zona_3":["Calidonia","San Miguel","Albrook","Altos de Diablo"],
"Zona_4":["Punta Paitilla","Bella Vista","Universidad"] ,
"Zona_5":["Punta Pacífica","El Dorado","Las Sabanas","Bethania"] ,
"Zona_6":["Panamá Viejo","Río Abajo","Villa de las Fuentes","Los Libertadores"] ,
"Zona_7":["Costa del Este","Chanis","Auto Motor"]
}
def zona(lugar):
datos =[]
for zon in Zona:
# for x in Zona[zon]:
if lugar in Zona[zon]:
datos.append("zona")
datos.append(zon)
return datos
# lugar = "Auto Motor"
# zona(lugar)
| mdmirabal/Parcial2-Prog3 | zona.py | Python | mit | 683 |
EMAIL_PORT = 1025
ROOT_URLCONF = 'django_mailer.apptest.urls'
SECRET_KEY = 'yo secret yo'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'django_mailer.sqlite',
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django_mailer',
'django_mailer.testapp'
)
| maykinmedia/django-mailer-2 | django_mailer/testapp/settings.py | Python | mit | 354 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-08 14:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0052_baseexporttask_2'),
]
operations = [
migrations.AlterField(
model_name='contacturn',
name='urn',
field=models.CharField(choices=[('tel', 'Phone number'), ('facebook', 'Facebook identifier'), ('twitter', 'Twitter handle'), ('viber', 'Viber identifier'), ('line', 'LINE identifier'), ('telegram', 'Telegram identifier'), ('mailto', 'Email address'), ('ext', 'External identifier'), ('fcm', 'Firebase Cloud Messaging identifier')], help_text='The Universal Resource Name as a string. ex: tel:+250788383383', max_length=255),
),
]
| onaio/rapidpro | temba/contacts/migrations/0053_auto_20170208_1450.py | Python | agpl-3.0 | 837 |
import sys
import pytest
import yaml
from distributed import Client
from distributed.scheduler import COMPILED
from distributed.utils_test import gen_test, popen
@pytest.mark.skipif(COMPILED, reason="Fails with cythonized scheduler")
@gen_test(timeout=120)
async def test_text():
with popen(
[
sys.executable,
"-m",
"distributed.cli.dask_spec",
"--spec",
'{"cls": "dask.distributed.Scheduler", "opts": {"port": 9373}}',
]
):
with popen(
[
sys.executable,
"-m",
"distributed.cli.dask_spec",
"tcp://localhost:9373",
"--spec",
'{"cls": "dask.distributed.Worker", "opts": {"nanny": false, "nthreads": 3, "name": "foo"}}',
]
):
async with Client("tcp://localhost:9373", asynchronous=True) as client:
await client.wait_for_workers(1)
info = await client.scheduler.identity()
[w] = info["workers"].values()
assert w["name"] == "foo"
assert w["nthreads"] == 3
@pytest.mark.skipif(COMPILED, reason="Fails with cythonized scheduler")
@pytest.mark.asyncio
async def test_file(cleanup, tmp_path):
fn = str(tmp_path / "foo.yaml")
with open(fn, "w") as f:
yaml.dump(
{
"cls": "dask.distributed.Worker",
"opts": {"nanny": False, "nthreads": 3, "name": "foo"},
},
f,
)
with popen(["dask-scheduler", "--port", "9373", "--no-dashboard"]):
with popen(
[
sys.executable,
"-m",
"distributed.cli.dask_spec",
"tcp://localhost:9373",
"--spec-file",
fn,
]
):
async with Client("tcp://localhost:9373", asynchronous=True) as client:
await client.wait_for_workers(1)
info = await client.scheduler.identity()
[w] = info["workers"].values()
assert w["name"] == "foo"
assert w["nthreads"] == 3
def test_errors():
with popen(
[
sys.executable,
"-m",
"distributed.cli.dask_spec",
"--spec",
'{"foo": "bar"}',
"--spec-file",
"foo.yaml",
]
) as proc:
line = proc.stdout.readline().decode()
assert "exactly one" in line
assert "--spec" in line and "--spec-file" in line
with popen([sys.executable, "-m", "distributed.cli.dask_spec"]) as proc:
line = proc.stdout.readline().decode()
assert "exactly one" in line
assert "--spec" in line and "--spec-file" in line
| dask/distributed | distributed/cli/tests/test_dask_spec.py | Python | bsd-3-clause | 2,830 |
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Raul Melo
# Written by Raul Melo <raulmelo@gmail.com>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: swdepot
short_description: Manage packages with swdepot package manager (HP-UX)
description:
- Will install, upgrade and remove packages with swdepot package manager (HP-UX)
version_added: "1.4"
notes: []
author: "Raul Melo (@melodous)"
options:
name:
description:
- package name.
required: true
default: null
choices: []
aliases: []
version_added: 1.4
state:
description:
- whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: true
default: null
choices: [ 'present', 'latest', 'absent']
aliases: []
version_added: 1.4
depot:
description:
- The source repository from which install or upgrade a package.
required: false
default: null
choices: []
aliases: []
version_added: 1.4
'''
EXAMPLES = '''
- swdepot:
name: unzip-6.0
state: installed
depot: 'repository:/path'
- swdepot:
name: unzip
state: latest
depot: 'repository:/path'
- swdepot:
name: unzip
state: absent
'''
import re
import pipes
def compare_package(version1, version2):
""" Compare version packages.
Return values:
-1 first minor
0 equal
1 first greater """
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
normalized_version1 = normalize(version1)
normalized_version2 = normalize(version2)
if normalized_version1 == normalized_version2:
rc = 0
elif normalized_version1 < normalized_version2:
rc = -1
else:
rc = 1
return rc
def query_package(module, name, depot=None):
""" Returns whether a package is installed or not and version. """
cmd_list = '/usr/sbin/swlist -a revision -l product'
if depot:
rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)),
use_unsafe_shell=True)
else:
rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
else:
version = None
return rc, version
def remove_package(module, name):
""" Uninstall package if installed. """
cmd_remove = '/usr/sbin/swremove'
rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
if rc == 0:
return rc, stdout
else:
return rc, stderr
def install_package(module, depot, name):
""" Install package if not already installed """
cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
if rc == 0:
return rc, stdout
else:
return rc, stderr
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg'], required=True),
state = dict(choices=['present', 'absent', 'latest'], required=True),
depot = dict(default=None, required=False)
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
depot = module.params['depot']
changed = False
msg = "No changed"
rc = 0
if ( state == 'present' or state == 'latest' ) and depot is None:
output = "depot parameter is mandatory in present or latest task"
module.fail_json(name=name, msg=output, rc=rc)
#Check local version
rc, version_installed = query_package(module, name)
if not rc:
installed = True
msg = "Already installed"
else:
installed = False
if ( state == 'present' or state == 'latest' ) and installed is False:
if module.check_mode:
module.exit_json(changed=True)
rc, output = install_package(module, depot, name)
if not rc:
changed = True
msg = "Package installed"
else:
module.fail_json(name=name, msg=output, rc=rc)
elif state == 'latest' and installed is True:
#Check depot version
rc, version_depot = query_package(module, name, depot)
if not rc:
if compare_package(version_installed,version_depot) == -1:
if module.check_mode:
module.exit_json(changed=True)
#Install new version
rc, output = install_package(module, depot, name)
if not rc:
msg = "Package upgraded, Before " + version_installed + " Now " + version_depot
changed = True
else:
module.fail_json(name=name, msg=output, rc=rc)
else:
output = "Software package not in repository " + depot
module.fail_json(name=name, msg=output, rc=rc)
elif state == 'absent' and installed is True:
if module.check_mode:
module.exit_json(changed=True)
rc, output = remove_package(module, name)
if not rc:
changed = True
msg = "Package removed"
else:
module.fail_json(name=name, msg=output, rc=rc)
if module.check_mode:
module.exit_json(changed=False)
module.exit_json(changed=changed, name=name, state=state, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| tsdmgz/ansible | lib/ansible/modules/packaging/os/swdepot.py | Python | gpl-3.0 | 6,161 |
"""
PaStA - Patch Stack Analysis
Copyright (c) OTH Regensburg, 2016-2019
Author:
Ralf Ramsauer <ralf.ramsauer@oth-regensburg.de>
This work is licensed under the terms of the GNU GPL, version 2. See
the COPYING file in the top-level directory.
"""
from logging import getLogger
log = getLogger(__name__[-15:])
class Clustering:
SEPARATOR = '=>'
def __init__(self):
self.clusters = list()
self.lookup = dict()
self.upstream = set()
def optimize(self):
# get optimized list by filtering orphaned elements
self.clusters = list(filter(None, self.clusters))
# reset lookup table
self.lookup = dict()
# recreate the lookup dictionary
for i, keylist in enumerate(self.clusters):
for key in keylist:
self.lookup[key] = i
def ripup_cluster(self, representative):
"""
Rips up a cluster. This removes all connections of the elements of the
cluster and reinserts them as single-element clusters
:return: Elements of the former cluster
"""
id = self.lookup[representative]
elems = self.clusters.pop(id)
for elem in elems:
self.lookup.pop(elem)
for elem in elems:
self.insert_element(elem)
return elems
def is_related(self, *elems):
"""
Returns True, if _all_ elements are in the same cluster
"""
ids = {self.lookup.get(x, None) for x in elems}
if None in ids:
return False
return len(ids) == 1
def remove_element(self, elem):
"""
Remove a single element from its cluster
"""
self.upstream.discard(elem)
id = self.lookup.pop(elem)
self.clusters[id].remove(elem)
def insert_element(self, elem):
"""
Assigns elem to a new cluster. Returns the new ID of the cluster. If
elem is already existent and assigned to a cluster, do nothing but
return the ID of the cluster.
"""
if elem in self.lookup:
return self.lookup[elem]
self.clusters.append(set([elem]))
id = len(self.clusters) - 1
self.lookup[elem] = id
return id
def _merge_clusters(self, *ids):
new_class = set()
new_id = min(ids)
for id in ids:
for key in self.clusters[id]:
self.lookup[key] = new_id
new_class |= self.clusters[id]
self.clusters[id] = set()
self.clusters[new_id] = new_class
# truncate empty trailing list elements
while not self.clusters[-1]:
self.clusters.pop()
return new_id
def insert(self, *elems):
"""
Create a new cluster with elements elems.
"""
if len(elems) == 0:
return
ids = [self.insert_element(elem) for elem in elems]
# check if all elements are already in the same class
if len(set(ids)) == 1:
return ids[0]
return self._merge_clusters(*ids)
def get_cluster_id(self, key):
return self.lookup[key]
def mark_upstream(self, key, is_upstream=True):
if is_upstream is True:
self.upstream.add(key)
else:
self.upstream.discard(key)
def get_all_elements(self):
"""
Returns all elements as a set. This includes both, upstream and
downstream.
"""
return set(self.lookup.keys())
def get_cluster(self, elem):
"""
Given elem, this function returns all elements of the cluster as a set.
This includes both, upstram and downstream.
"""
if elem not in self:
return None
id = self.get_cluster_id(elem)
return self.clusters[id].copy()
def get_upstream(self, elem=None):
"""
Returns all upstream entries that are related to elem. If elem is not
specified, this function returns all upstream patches.
"""
if elem:
return self.upstream.intersection(self.clusters[self.lookup[elem]])
return self.upstream
def get_downstream(self, elem=None):
"""
Returns all downstream entries that are related to elem. If elem is not
specified, this function returns all downstream patches.
"""
if elem:
return self.clusters[self.lookup[elem]] - self.upstream
return set(self.lookup.keys()) - self.upstream
def __getitem__(self, item):
return self.get_cluster(item)
def __len__(self):
return len(self.clusters)
def __str__(self):
retval = str()
cluster_list = [(sorted(downstream), sorted(upstream)) for
downstream, upstream in self.iter_split()]
downstream_list = sorted(filter(lambda x: len(x[0]), cluster_list))
upstream_list = sorted(
[x[1] for x in
filter(lambda x: len(x[0]) == 0, cluster_list)])
for downstreams, upstreams in downstream_list:
# Convert to string representation. In this way, we can also handle other
# types than pure strings, like integers.
downstreams = [str(x) for x in downstreams]
upstreams = [str(x) for x in upstreams]
retval += ' '.join(downstreams)
if len(upstreams):
retval += ' %s %s' % (Clustering.SEPARATOR, ' '.join(upstreams))
retval += '\n'
for upstreams in upstream_list:
retval += '%s %s\n' % (Clustering.SEPARATOR, ' '.join(upstreams))
return retval
def get_representative_system(self, compare_function):
"""
Return a complete representative system of the equivalence class. Only
downstream entries are considered.
:param compare_function: a function that compares two elements of an
equivalence class
"""
retval = set()
for cluster, _ in self.iter_split():
if len(cluster) == 0:
continue
cluster = list(cluster)
if not cluster:
continue
if len(cluster) == 1:
retval.add(cluster[0])
continue
rep = cluster[0]
for element in cluster[1:]:
if compare_function(element, rep):
rep = element
retval.add(rep)
return retval
def __iter__(self):
# iterate over all classes, and return all items
for elem in self.clusters:
if not elem:
continue
yield elem
def iter_split(self):
"""
Iterate over all clusters. Per cluster, return a tuple of
(downstream, upstream) patches
"""
for cluster in self.clusters:
downstream = cluster - self.upstream
upstream = cluster & self.upstream
if len(downstream) == 0 and len(upstream) == 0:
continue
yield downstream, upstream
def __contains__(self, item):
return item in self.lookup
def to_file(self, filename):
self.optimize()
with open(filename, 'w') as f:
f.write(str(self))
@staticmethod
def from_file(filename, must_exist=False):
def split_elements(elems):
return list(filter(None, elems.split(' ')))
retval = Clustering()
try:
with open(filename, 'r') as f:
content = f.read()
except FileNotFoundError:
log.warning('Clustering not found: %s' % filename)
if must_exist:
raise
return retval
if not (content and len(content)):
return retval
content = list(filter(None, content.splitlines()))
for line in content:
line = line.split(Clustering.SEPARATOR)
# Append empty upstream list, if not present
if len(line) == 1:
line.append('')
downstream, upstream = split_elements(line[0]), \
split_elements(line[1])
retval.insert(*(downstream + upstream))
for element in upstream:
retval.mark_upstream(element)
return retval
| lfd/PaStA | pypasta/Clustering.py | Python | gpl-2.0 | 8,421 |
# Generated by Django 2.2.24 on 2021-07-08 05:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cyklomapa', '0008_czechiaaccidents_priciny_nehody'),
]
operations = [
migrations.AddField(
model_name='czechiaaccidents',
name='lokalita',
field=models.CharField(max_length=20, null=True),
),
]
| auto-mat/prahounakole | apps/cyklomapa/migrations/0009_czechiaaccidents_lokalita.py | Python | gpl-3.0 | 423 |
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
# Uncomment below in order to disallow redirects
#REDIRECT_ENABLED = False
# Uncomment this to lessen the spider's output
#LOG_LEVEL = 'INFO'
BOT_NAME = 'quickscan_spider'
SPIDER_MODULES = ['quickscan.spiders']
NEWSPIDER_MODULE = 'quickscan.spiders'
# For adding javascript rendering
#DOWNLOAD_HANDLERS = {'http':'quickscan.scrapyjs.dhandler.WebkitDownloadHandler',
# 'https': 'quickscan.scrapyjs.dhandler.WebkitDownloadHandler'}
# 100 (first): Make sure there's no duplicate requests that have some value changed
# 200 (second): Make sure there's a random working User-Agent header set if that value's not injected with the test string
DOWNLOADER_MIDDLEWARES = {'quickscan.middlewares.RandomUserAgentMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 200}
COOKIES_ENABLED = True
#COOKIES_DEBUG = True
# Prevent duplicate link crawling
# Bloom filters are way more memory efficient than just a hash lookup
DUPEFILTER_CLASS = 'quickscan.bloomfilters.BloomURLDupeFilter'
#DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
ITEM_PIPELINES = {'quickscan.pipelines.Resp_analyzer':100}
#FEED_FORMAT = 'csv'
#FEED_URI = 'example.txt'
CONCURRENT_REQUESTS = 30
DOWNLOAD_DELAY = 0
| DanMcInerney/quickscan | quickscan/settings.py | Python | gpl-2.0 | 1,328 |
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2001 - 2020 Massimo Gerardi all rights reserved.
#
# Author: Massimo Gerardi massimo.gerardi@gmail.com
#
# Copyright (c) 2020 Qsistemi.com. All rights reserved.
#
# Viale Giorgio Ribotta, 11 (Roma)
# 00144 Roma (RM) - Italy
# Phone: (+39) 06.87.163
#
#
# Si veda file COPYING per le condizioni di software.
#
# www.qsistemi.com - italy@qsistemi.com
import wx
import wx.lib.buttons as buttons #import *
import string
from cfg import *
import cfg
def create(parent,cnt):
return Ordini(parent,cnt)
class Ordini(wx.ScrolledWindow):
def __init__(self, prnt, cnt):
self.win=wx.ScrolledWindow.__init__(self, parent=prnt, id=-1,size = wx.DefaultSize)
self.SetScrollbars(1,1,100,100) #####
self.FitInside() ######
png = wx.Image((cfg.path_img + "cerca19x19.png"),
wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.ttl = cnt[0]
self.val_numord = 0
self.tipoord = cnt[1]
self.tcpart = "C"
if self.tipoord=='OF': self.tcpart = "F"
self.tblart = "articoli"
self.ttlanag = _('Anagrafica Clienti')
self.ttlart = _('Anagrafica Articoli')
self.ttldest = _('Anagrafica Spedizione')
self.rec = cnt[2]
self.AggMenu = cnt[3]
self.IDMENU = cnt[4]
self.voktestata = 0
#self.font = self.GetFont()
self.color = self.GetBackgroundColour()
Nid = wx.NewId()
self.__MDI__ = wx.GetApp().GetPhasisMdi()
self.font=self.__MDI__.font
self.SetFont(self.font)
self.CnAz = self.__MDI__.GetConnAZ()
self.annoc = self.__MDI__.GetAnnoC()
self.datacon = self.__MDI__.GetDataC()
self.dzDatiAzienda = self.__MDI__.dzDatiAzienda
self.pnl = wx.Panel(id = wx.NewId(), name = '',
parent = self, pos = wx.Point(0, 0), size = wx.DLG_SZE(self,680/2,420/2), #size = wx.Size(680, 420),
style = wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL)
self.pnl.SetFont(self.font)
self.ntbk = wx.Notebook(id = wx.NewId(), name = 'notebook',
parent = self.pnl, pos = wx.DLG_PNT(self.pnl, 5/2,5/2), #pos = wx.Point(5, 5),
size = wx.DLG_SZE(self.pnl,cfg.NTBKHTUTTI/2,cfg.NTBKVTUTTI/2),style = 0)
#size = wx.Size(cfg.NTBKH,cfg.NTBKV), style = 0)
self.ntbk.SetFont(self.font)
self.pnl1 = wx.Panel(id = wx.NewId(), name = 'panel1',
parent = self.ntbk, pos = wx.Point(0, 0))
self.pnl1.SetFont(self.font)
self.pnl2 = wx.Panel(id = wx.NewId(), name = 'panel2',
parent = self.ntbk, pos = wx.Point(0, 0))
self.pnl2.SetFont(self.font)
self.pnl3 = wx.Panel(id = wx.NewId(), name = 'panel3',
parent = self.ntbk, pos = wx.Point(0, 0))
self.pnl3.SetFont(self.font)
self.ntbk.AddPage(imageId = -1, page = self.pnl1,
select = True, text = _(' Testata')+' (1) ')
self.ntbk.AddPage(imageId = -1, page = self.pnl2,
select = False, text = _(' Corpo')+' (2) ')
self.ntbk.AddPage(imageId = -1, page = self.pnl3,
select = False, text = _(' Calce')+' (3) ')
#self.pnl.SetFont(self.font)
#self.pnl1.SetFont(self.font)
#self.pnl2.SetFont(self.font)
#self.pnl3.SetFont(self.font)
#self.ntbk.SetFont(self.font)
wx.StaticText(self.pnl1, -1, _("Doc. :"), wx.DLG_PNT(self.pnl1, 5,7))
self.TIPO_ORD = wx.ComboBox(self.pnl1, Nid,"",
wx.DLG_PNT(self.pnl1, 27,5),
wx.DLG_SZE(self.pnl1, 90,-1),[],wx.CB_DROPDOWN | wx.CB_SORT )
self.vTIPO_ORD = wx.TextCtrl(self.pnl1, -1, "",
wx.DLG_PNT(self.pnl1, 275,90))
self.lord = wx.StaticText(self.pnl1, -1, _("Num. :"),
wx.DLG_PNT(self.pnl1, 120,7))
self.anno = wx.ComboBox(self.pnl1, Nid, self.annoc,
wx.DLG_PNT(self.pnl1, 145,5),
wx.DLG_SZE(self.pnl1, 35,-1),[self.annoc], wx.CB_DROPDOWN | wx.CB_SORT )
wx.StaticText(self.pnl1, -1, "/", wx.DLG_PNT(self.pnl1, 182,7))
self.num_ord = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 185,5),
wx.DLG_SZE(self.pnl1, 40,cfg.DIMFONTDEFAULT),
wx.ALIGN_RIGHT | wx.TE_PROCESS_ENTER )
self.cnum_ord = wx.BitmapButton(self.pnl1, -1, png,#wx.Button(self.pnl1, Nid, "...",
wx.DLG_PNT(self.pnl1, 226,5),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
wx.StaticText(self.pnl1, -1, _("Data :"),
wx.DLG_PNT(self.pnl1, 243,7))
self.data_ord = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 264,5),
wx.DLG_SZE(self.pnl1, 50,cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT | wx.TE_PROCESS_ENTER)
self.cdataord = wx.BitmapButton(self.pnl1, -1, png,#wx.Button(self.pnl1, Nid, "...",
wx.DLG_PNT(self.pnl1, 315,5),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.vdata_ord = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 280, 125),
wx.DLG_SZE(self.pnl1, 50,cfg.DIMFONTDEFAULT))
wx.StaticText(self.pnl1, -1,_(" Vs. Ordine :"), wx.DLG_PNT(self.pnl1, 5,20))
self.vs_ord = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 5,30),
wx.DLG_SZE(self.pnl1, 50,cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT | wx.TE_PROCESS_ENTER)
wx.StaticText(self.pnl1, -1, _("Data :"),
wx.DLG_PNT(self.pnl1, 60,20))
self.vs_data = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 60, 30),
wx.DLG_SZE(self.pnl1, 50,cfg.DIMFONTDEFAULT),wx.TE_PROCESS_ENTER)
self.cvsdata = wx.BitmapButton(self.pnl1, -1, png,#wx.Button(self.pnl1, Nid, "...",
wx.DLG_PNT(self.pnl1, 115,30),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.sbox_cf = wx.StaticBox(self.pnl1, Nid, _(' Cliente '),
wx.DLG_PNT(self.pnl1, 5,45), wx.DLG_SZE(self.pnl1, 265,65))
self.lcodcf = wx.StaticText(self.pnl1, -1, _("Codice"),
wx.DLG_PNT(self.pnl1, 10,55))
self.codcf = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 10, 65),
wx.DLG_SZE(self.pnl1, 40,cfg.DIMFONTDEFAULT) ,wx.TE_PROCESS_ENTER)
self.codcf1 = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 10, 65),
wx.DLG_SZE(self.pnl1, 40,cfg.DIMFONTDEFAULT))
self.ccodcf = wx.BitmapButton(self.pnl1, -1, png,#wx.Button(self.pnl1, Nid, "...",
wx.DLG_PNT(self.pnl1, 55,65),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.lragsoc = wx.StaticText(self.pnl1, -1, _("Cessionario :"),
wx.DLG_PNT(self.pnl1, 75,55))
self.ragsoc1 = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 75,65),
wx.DLG_SZE(self.pnl1, 120,cfg.DIMFONTDEFAULT),wx.TE_PROCESS_ENTER)
self.cragsoc1 = wx.BitmapButton(self.pnl1, -1, png,#wx.Button(self.pnl1, Nid, "...",
wx.DLG_PNT(self.pnl1, 200,65),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.ragsoc3 = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 75,65),
wx.DLG_SZE(self.pnl1, 120,cfg.DIMFONTDEFAULT))
self.cragsoc3 = wx.BitmapButton(self.pnl1, -1, png,#wx.Button(self.pnl1, Nid, "...",
wx.DLG_PNT(self.pnl1, 200,65),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.ragsoc2 = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 148, 65),
wx.DLG_SZE(self.pnl1, 100,cfg.DIMFONTDEFAULT),wx.TE_PROCESS_ENTER)
self.ragsoc4 = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 148, 65),
wx.DLG_SZE(self.pnl1, 100,cfg.DIMFONTDEFAULT))
self.lindiriz = wx.StaticText(self.pnl1, -1, _("Indirizzo :"),
wx.DLG_PNT(self.pnl1, 10,82))
self.indiriz = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 45,80),
wx.DLG_SZE(self.pnl1, 170,cfg.DIMFONTDEFAULT))
self.cdest = wx.Button(self.pnl1, Nid, _(' Cliente '),
wx.DLG_PNT(self.pnl1, 218,80),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV))
self.rdest1 = wx.Button(self.pnl1, Nid, _("Annulla"),
wx.DLG_PNT(self.pnl1, 218,65),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV))
wx.StaticText(self.pnl1, -1, _("Citta` :"),
wx.DLG_PNT(self.pnl1, 10,97))
self.zona = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 45,95),
wx.DLG_SZE(self.pnl1, 100,cfg.DIMFONTDEFAULT))
wx.StaticText(self.pnl1, -1, _("CAP :"), wx.DLG_PNT(self.pnl1, 150,97))
self.cap = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 180, 95),
wx.DLG_SZE(self.pnl1, 35,cfg.DIMFONTDEFAULT))
wx.StaticText(self.pnl1, -1, _("PR :"), wx.DLG_PNT(self.pnl1, 225,97))
self.pr = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 243, 95),
wx.DLG_SZE(self.pnl1, 20,cfg.DIMFONTDEFAULT))
self.indiriz1 = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 45,80),
wx.DLG_SZE(self.pnl1, 170,cfg.DIMFONTDEFAULT))
self.zona1 = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 45,95),
wx.DLG_SZE(self.pnl1, 100,cfg.DIMFONTDEFAULT))
self.cap1 = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 180, 95),
wx.DLG_SZE(self.pnl1, 35,cfg.DIMFONTDEFAULT))
self.pr1 = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 243, 95),
wx.DLG_SZE(self.pnl1, 20,cfg.DIMFONTDEFAULT))
self.vDIVvend = wx.TextCtrl(self.pnl1, -1, "",
wx.DLG_PNT(self.pnl1, 280,130))
wx.StaticText(self.pnl1, -1, _("Vs Rifer. :"),
wx.DLG_PNT(self.pnl1, 10,114))
self.vsrif = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 45, 112),
wx.DLG_SZE(self.pnl1, 85,cfg.DIMFONTDEFAULT), wx.TE_PROCESS_ENTER)
wx.StaticText(self.pnl1, -1, _("Ns Rifer. :"),
wx.DLG_PNT(self.pnl1, 143,114))
self.nsrif = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 180, 112),
wx.DLG_SZE(self.pnl1, 85,cfg.DIMFONTDEFAULT), wx.TE_PROCESS_ENTER)
self.lnote = wx.StaticText(self.pnl1, -1, _("Note :"),
wx.DLG_PNT(self.pnl1, 10,129))
self.note = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 45,127),
wx.DLG_SZE(self.pnl1, 220,cfg.DIMFONTDEFAULT), wx.TE_PROCESS_ENTER)
self.stt_ord1 = wx.TextCtrl(self.pnl1, -1, "",
wx.DLG_PNT(self.pnl1, 285,137))
self.lcodage = wx.StaticText(self.pnl1, -1, _("Agente :"),
wx.DLG_PNT(self.pnl1, 10,144))
self.codage = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 45,142),
wx.DLG_SZE(self.pnl1, 45,cfg.DIMFONTDEFAULT),wx.TE_PROCESS_ENTER)
self.ccodage = wx.BitmapButton(self.pnl1, -1, png,#wx.Button(self.pnl1, Nid, "...",
wx.DLG_PNT(self.pnl1, 93,142),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.ragsoc1age = wx.TextCtrl(self.pnl1, Nid, "",
wx.DLG_PNT(self.pnl1, 108,142),
wx.DLG_SZE(self.pnl1, 100,cfg.DIMFONTDEFAULT),wx.TE_PROCESS_ENTER)
wx.StaticText(self.pnl1, -1, _("Priorita`:"),
wx.DLG_PNT(self.pnl1, 213,144))
self.PRIO = wx.ComboBox(self.pnl1, Nid,"5",
wx.DLG_PNT(self.pnl1, 245,142),
wx.DLG_SZE(self.pnl1, 20,-1),[],
wx.CB_DROPDOWN | wx.CB_SORT )
self.vPRIO = wx.TextCtrl(self.pnl1, -1, "",
wx.DLG_PNT(self.pnl1, 275,90))
wx.StaticText(self.pnl1, -1, _("Pagamento :"),
wx.DLG_PNT(self.pnl1, 10,160))
self.PAGAM = wx.ComboBox(self.pnl1, Nid,"",
wx.DLG_PNT(self.pnl1, 60,158),
wx.DLG_SZE(self.pnl1, 120,-1),[],
wx.CB_DROPDOWN | wx.CB_SORT | wx.TE_PROCESS_ENTER)
self.vPAGAM = wx.TextCtrl(self.pnl1, -1, "",
wx.DLG_PNT(self.pnl1, 275,90))
self.cmpiliberi = wx.Button(self.pnl1, Nid, _("Campi Liberi"),
wx.DLG_PNT(self.pnl1,205,158),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV))
self.localit = wx.TextCtrl(self.pnl1, -1, "",
wx.DLG_PNT(self.pnl1, 280,37))
self.stato = wx.TextCtrl(self.pnl1, -1, "",
wx.DLG_PNT(self.pnl1, 280,37))
self.localit1 = wx.TextCtrl(self.pnl1, -1, "",
wx.DLG_PNT(self.pnl1, 280,37))
self.stato1 = wx.TextCtrl(self.pnl1, -1, "",
wx.DLG_PNT(self.pnl1, 280,37))
self.conse = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl3, 280,37))
self.trasp = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl3, 280,37))
self.cod_vet = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl3, 280,37))
self.rag_ord = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl3, 280,37))
self.campo1 = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl3, 280,37))
self.campo2 = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl3, 280,37))
self.lc = wx.ListCtrl(self.pnl2, Nid,
wx.DLG_PNT(self.pnl2, 5,10),
wx.DLG_SZE(self.pnl2, 323,95),
wx.LC_REPORT | wx.LC_HRULES | wx.LC_VRULES)
#wx.StaticLine(self.pnl, -1, wx.DLG_PNT(self.pnl, 5,155),
# wx.DLG_SZE(self.pnl, 283,-1))
self.lcod = wx.StaticText(self.pnl2, -1, _("Codice :"),
wx.DLG_PNT(self.pnl2, 5,112))
self.codart = wx.TextCtrl(self.pnl2, Nid, "",
wx.DLG_PNT(self.pnl2, 30,110),
wx.DLG_SZE(self.pnl2, 55,cfg.DIMFONTDEFAULT),wx.TE_PROCESS_ENTER)
self.ccodart = wx.BitmapButton(self.pnl2, -1, png,#wx.Button(self.pnl2, Nid, "...",
wx.DLG_PNT(self.pnl2, 86,110),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH, cfg.btnSzeV),
wx.TE_PROCESS_ENTER)
#self.ccodart.SetFont(wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL))
self.codbar = wx.TextCtrl(self.pnl2, Nid, "",
wx.DLG_PNT(self.pnl2, 35,110),
wx.DLG_SZE(self.pnl2, 60,cfg.DIMFONTDEFAULT),wx.TE_PROCESS_ENTER)
self.ccodbar = buttons.GenToggleButton(self.pnl2, Nid, "|''|'|",
wx.DLG_PNT(self.pnl2, 99,110),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
#self.ccodbar.SetFont(wx.Font(8, wx.SWISS, wx.NORMAL, wx.NORMAL))
# inizio personalizzazione testo
self.ctesto = buttons.GenButton(self.pnl2, Nid, "T",
wx.DLG_PNT(self.pnl2, 112,110),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.dtesto = wx.TextCtrl(self.pnl2, -1, "", wx.DLG_PNT(self.pnl2,5,5))
# fine personalizzazione testo
self.ccodinfo = buttons.GenButton(self.pnl2, Nid, " ? ",
wx.DLG_PNT(self.pnl2, 125,110),
wx.DLG_SZE(self.pnl2,cfg.btnSzeSH,cfg.btnSzeV))
wx.StaticText(self.pnl2, -1, _("Descrizione :"),
wx.DLG_PNT(self.pnl2, 140,112))
self.descriz = wx.TextCtrl(self.pnl2, Nid, "",
wx.DLG_PNT(self.pnl2, 180, 110),
wx.DLG_SZE(self.pnl2, 130,cfg.DIMFONTDEFAULT), wx.TE_PROCESS_ENTER)
self.cdescriz = wx.BitmapButton(self.pnl2, -1, png,#wx.Button(self.pnl2, Nid, "...",
wx.DLG_PNT(self.pnl2, 315,110),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
wx.StaticText(self.pnl2, -1, _("UM :"),
wx.DLG_PNT(self.pnl2, 5,127))
self.UM = wx.TextCtrl(self.pnl2, Nid, "",
wx.DLG_PNT(self.pnl2, 23,125),
wx.DLG_SZE(self.pnl2, 20, cfg.DIMFONTDEFAULT),wx.TE_PROCESS_ENTER)
self.lmis = wx.StaticText(self.pnl2, -1, _("Mis :"),
wx.DLG_PNT(self.pnl2, 50,127))
self.mis = wx.TextCtrl(self.pnl2, Nid, "",
wx.DLG_PNT(self.pnl2, 70,125),
wx.DLG_SZE(self.pnl2, 20,cfg.DIMFONTDEFAULT))
wx.StaticText(self.pnl2, -1, _("Sc % :"),
wx.DLG_PNT(self.pnl2, 95,127))
self.sc1 = wx.TextCtrl(self.pnl2, Nid, "0,00",
wx.DLG_PNT(self.pnl2, 120,125),
wx.DLG_SZE(self.pnl2, 25, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.lvALIVA = wx.StaticText(self.pnl2, -1, _("Cod. Iva :"),
wx.DLG_PNT(self.pnl2, 155,127))
self.vALIVA = wx.TextCtrl(self.pnl2, Nid, "20",
wx.DLG_PNT(self.pnl2, 190,125),
wx.DLG_SZE(self.pnl2, 20, cfg.DIMFONTDEFAULT),
wx.ALIGN_RIGHT | wx.TE_PROCESS_ENTER )
self.cALIVA = wx.BitmapButton(self.pnl2, -1, png,#wx.Button(self.pnl2, Nid, "...",
wx.DLG_PNT(self.pnl2, 215,125),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.dALIVA = wx.TextCtrl(self.pnl2, Nid, "",
wx.DLG_PNT(self.pnl2, 230,125),
wx.DLG_SZE(self.pnl2, 95, cfg.DIMFONTDEFAULT))
self.lvPDC = wx.StaticText(self.pnl2, -1, _("Cod. p.d.c. :"),
wx.DLG_PNT(self.pnl2, 235,127))
self.vPDC = wx.TextCtrl(self.pnl2, Nid, "7501",
wx.DLG_PNT(self.pnl2, 280,125),
wx.DLG_SZE(self.pnl2, 30, cfg.DIMFONTDEFAULT),
wx.ALIGN_RIGHT | wx.TE_PROCESS_ENTER )
self.cvPDC = wx.BitmapButton(self.pnl2, -1, png,#wx.Button(self.pnl2, Nid, "...",
wx.DLG_PNT(self.pnl2, 315,125),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
#self.lpeso = wx.StaticText(self.pnl2, -1, "Peso :",
# wx.DLG_PNT(self.pnl2, 230,127))
#self.peso = wx.TextCtrl(self.pnl2, Nid, "",
# wx.DLG_PNT(self.pnl2, 255,125),
# wx.DLG_SZE(self.pnl2, 20,-1), wx.ALIGN_RIGHT)
#self.lvolume = wx.StaticText(self.pnl2, -1, "volume :",
# wx.DLG_PNT(self.pnl2, 280,127))
#self.volume = wx.TextCtrl(self.pnl2, Nid, "",
# wx.DLG_PNT(self.pnl2, 300,125),
# wx.DLG_SZE(self.pnl2, 20,-1), wx.ALIGN_RIGHT)
self.llst = wx.StaticText(self.pnl2, -1, _("Listino :"),
wx.DLG_PNT(self.pnl2, 5,142))
self.lst = wx.TextCtrl(self.pnl2, Nid, "",
wx.DLG_PNT(self.pnl2, 33,140),
wx.DLG_SZE(self.pnl2, 20, cfg.DIMFONTDEFAULT),
wx.ALIGN_RIGHT )
self.clst = wx.BitmapButton(self.pnl2, -1, png,#wx.Button(self.pnl2, Nid, "...",
wx.DLG_PNT(self.pnl2, 57,140),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
# ult modifica
wx.StaticText(self.pnl2, -1, _("Colli :"), wx.DLG_PNT(self.pnl2, 82,142))
self.colli = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 100,140),
wx.DLG_SZE(self.pnl2, 25, cfg.DIMFONTDEFAULT),
wx.ALIGN_RIGHT | wx.TE_PROCESS_ENTER )
self.lprovv = wx.StaticText(self.pnl2, -1, _("Provv. :"),
wx.DLG_PNT(self.pnl2, 73,142))
self.provv = wx.TextCtrl(self.pnl2, Nid, "0,00",
wx.DLG_PNT(self.pnl2, 100,140),
wx.DLG_SZE(self.pnl2, 25, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.lcosto = wx.StaticText(self.pnl2, -1, _("Costo :"),
wx.DLG_PNT(self.pnl2, 132,142))
self.costo = wx.TextCtrl(self.pnl2, Nid, "0,00",
wx.DLG_PNT(self.pnl2, 157,140),
wx.DLG_SZE(self.pnl2, 40, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.lprezzo = wx.StaticText(self.pnl2, Nid, _("Prezzo :"),
wx.DLG_PNT(self.pnl2, 202,142))
self.prezzo = wx.TextCtrl(self.pnl2, Nid, "0,00",
wx.DLG_PNT(self.pnl2, 232,140),
wx.DLG_SZE(self.pnl2, 45, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT | wx.TE_PROCESS_ENTER )
wx.StaticText(self.pnl2, -1, _("Qt :"),
wx.DLG_PNT(self.pnl2, 283,142))
self.qt = wx.TextCtrl(self.pnl2, Nid, "0,00",
wx.DLG_PNT(self.pnl2, 297,140),
wx.DLG_SZE(self.pnl2, 30, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT | wx.TE_PROCESS_ENTER )
#self.qt = wx.SpinCtrl(self.pnl2, -1, "0.00",
# wx.DLG_PNT(self.pnl2, 295,140),
# wx.DLG_SZE(self.pnl2, 30,-1))
#self.qt.SetRange(0.00,9999.99)
self.importo = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 275,37))
self.ltotale = wx.StaticText(self.pnl2, -1, _("Totale :"),
wx.DLG_PNT(self.pnl2, 235,162))
#self.ltotale.SetFont(self.font)
#self.ltotale.SetForegroundColour(wx.Colour(128, 128, 128))
self.totale = wx.TextCtrl(self.pnl2, Nid, "0,00",
wx.DLG_PNT(self.pnl2, 262,160),
wx.DLG_SZE(self.pnl2, 65, cfg.DIMFONTDEFAULT),
wx.ALIGN_RIGHT )
#self.totale.SetFont(self.font)
self.sc2 = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 275,37))
self.sc3 = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 275,37))
self.prezzo2 = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 275,37))
self.prezzo1 = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 275,37))
self.nriga = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 275,37))
self.vinprod = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 5,37))
self.vUM = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 5,37))
self.vMERCE = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 5,52))
self.vIMBAL = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,127))
self.vCONFE = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.codmerc = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.qt_ord = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.qt_con = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.qt_eva = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.prezzo_ag = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.datacons = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.peso = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.stt_ord2 = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.annodoc = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.tipodoc = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.datadoc = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.numdoc = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.campo2_art = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.campo1_art = wx.TextCtrl(self.pnl2, -1, "",
wx.DLG_PNT(self.pnl2, 285,137))
self.sbox_calce = wx.StaticBox(self.pnl3, Nid, " ",
wx.DLG_PNT(self.pnl3, 5,10),
wx.DLG_SZE(self.pnl3, 320,160))
self.lnote_calce = wx.StaticText(self.pnl3, -1, _("Note :"),
wx.DLG_PNT(self.pnl3, 10,27))
self.note_calce = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 45,25),
wx.DLG_SZE(self.pnl3, 260,55), style = wx.TE_MULTILINE)
self.lASPET = wx.StaticText(self.pnl3, Nid, _("Aspetto :"),
wx.DLG_PNT(self.pnl3, 15,87))
self.vASPET = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 55,85),
wx.DLG_SZE(self.pnl3, 20, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.cASPET = wx.BitmapButton(self.pnl3, -1, png,#wx.Button(self.pnl3, Nid, "...",
wx.DLG_PNT(self.pnl3, 80,85),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.dASPET = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 95,85),
wx.DLG_SZE(self.pnl3, 80, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.ltot_colli = wx.StaticText(self.pnl3, Nid, _("Num. totale colli :"),
wx.DLG_PNT(self.pnl3, 15,102))
self.tot_colli = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 75,100),
wx.DLG_SZE(self.pnl3, 30, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.ltot_peso = wx.StaticText(self.pnl3, Nid, _("Peso colli :"),
wx.DLG_PNT(self.pnl3, 125,102))
self.tot_peso = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 165,100),
wx.DLG_SZE(self.pnl3, 30, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.lscf = wx.StaticText(self.pnl3, Nid, _("Sconti finali :"),
wx.DLG_PNT(self.pnl3, 15,117))
self.scf1 = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 75,115),
wx.DLG_SZE(self.pnl3, 30, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.scf2 = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 110,115),
wx.DLG_SZE(self.pnl3, 30, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.scf3 = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 145,115),
wx.DLG_SZE(self.pnl3, 30, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.lvPDC_SC = wx.StaticText(self.pnl3, -1, _("Cod. p.d.c. :"),
wx.DLG_PNT(self.pnl3, 205,117))
self.vPDC_SC = wx.TextCtrl(self.pnl3, Nid, "6105",
wx.DLG_PNT(self.pnl3, 250,115),
wx.DLG_SZE(self.pnl3, 30, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.cvPDC_SC = wx.BitmapButton(self.pnl3, -1, png,#wx.Button(self.pnl3, Nid, "...",
wx.DLG_PNT(self.pnl3, 285,115),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.lprezzo_ac = wx.StaticText(self.pnl3, Nid, _("Acconto :"),
wx.DLG_PNT(self.pnl3, 15,132))
self.prezzo_ac = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 55,130),
wx.DLG_SZE(self.pnl3, 50, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.lprezzo_ac1 = wx.StaticText(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 120,132))
self.prezzo_ac1 = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 210, 130),
wx.DLG_SZE(self.pnl3, 50, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.csaldo = wx.BitmapButton(self.pnl3, -1, png,#wx.Button(self.pnl3, Nid, "...",
wx.DLG_PNT(self.pnl3, 265,130),
wx.DLG_SZE(self.pnl,cfg.btnSzeSH,cfg.btnSzeV))
self.ltotaledoc = wx.StaticText(self.pnl3, Nid, _("Totale Ordine :"),
wx.DLG_PNT(self.pnl3, 180,152))
self.totaledoc = wx.TextCtrl(self.pnl3, Nid, "",
wx.DLG_PNT(self.pnl3, 240, 150),
wx.DLG_SZE(self.pnl3, 60, cfg.DIMFONTDEFAULT), wx.ALIGN_RIGHT )
self.vCONSEG = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl2, 285,117))
self.campo1_calce = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl2, 285,117))
self.campo2_calce = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl2, 285,117))
self.fndvTIPO_ORD = wx.TextCtrl(self.pnl3, -1, "",
wx.DLG_PNT(self.pnl2, 285,117))
self.ok = wx.Button(self.pnl1, Nid, cfg.vcok,
wx.DLG_PNT(self.pnl, 275,30),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV) )
self.new = wx.Button(self.pnl1, Nid, cfg.vcnew,
wx.DLG_PNT(self.pnl, 275,30),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV) )
self.oktestata = wx.Button(self.pnl1, Nid, cfg.vcconf,
wx.DLG_PNT(self.pnl, 275,30),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV) )
self.inte = wx.Button(self.pnl1, Nid, cfg.vcint,
wx.DLG_PNT(self.pnl, 275,45),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV) )
self.canc = wx.Button(self.pnl1, Nid, cfg.vccanc,
wx.DLG_PNT(self.pnl, 275,45),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV) )
self.modi = wx.Button(self.pnl1, Nid, cfg.vcmodi,
wx.DLG_PNT(self.pnl, 275,60),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV) )
self.dele = wx.Button(self.pnl1, Nid, cfg.vcdele,
wx.DLG_PNT(self.pnl, 275,60),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV) )
self.stampa = wx.Button(self.pnl1, Nid, cfg.vcstampa,
wx.DLG_PNT(self.pnl, 275,75),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV) )
self.skanag = wx.Button(self.pnl1, Nid, cfg.vcanag,
wx.DLG_PNT(self.pnl, 275,90),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV) )
self.newr = wx.Button(self.pnl2, Nid, cfg.vcnewr,
wx.DLG_PNT(self.pnl2, 5,160),
wx.DLG_SZE(self.pnl,cfg.btnSzeL1H,cfg.btnSzeV))
self.okart = wx.Button(self.pnl2, Nid, cfg.vcokr,
wx.DLG_PNT(self.pnl2, 67,160),
wx.DLG_SZE(self.pnl,cfg.btnSzeL1H,cfg.btnSzeV))
self.modir = wx.Button(self.pnl2, Nid, cfg.vcmodir,
wx.DLG_PNT(self.pnl2, 67,160),
wx.DLG_SZE(self.pnl,cfg.btnSzeL1H,cfg.btnSzeV))
self.intr = wx.Button(self.pnl2, Nid, cfg.vcintr,
wx.DLG_PNT(self.pnl2, 130,160),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV))
self.delr = wx.Button(self.pnl2, Nid, cfg.vcdeler,
wx.DLG_PNT(self.pnl2, 182,160),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV))
self.rbCONFER = wx.RadioButton(self.pnl1, Nid, cfg.vcCONF,
wx.DLG_PNT(self.pnl1, 275,110),
wx.DLG_SZE(self.pnl1, 55,10))
self.rbEVASO = wx.RadioButton(self.pnl1, Nid, _(" Evaso "),
wx.DLG_PNT(self.pnl1, 275,120),
wx.DLG_SZE(self.pnl1, 55,10))
self.rbPREVIS = wx.RadioButton(self.pnl1, Nid, cfg.vcPREV,
wx.DLG_PNT(self.pnl1, 275,130),
wx.DLG_SZE(self.pnl1, 55,10))
for x in self.pnl.GetChildren(): x.SetFont(self.font)
for x in self.pnl1.GetChildren(): x.SetFont(self.font)
for x in self.pnl2.GetChildren(): x.SetFont(self.font)
for x in self.pnl3.GetChildren(): x.SetFont(self.font)
box = wx.GridSizer(1, 1)
box.Add(self.pnl, 0, wx.ALIGN_CENTER|wx.ALL,10)
self.SetSizer(box)
box.Fit(self)
self.ccodinfo.Bind(wx.EVT_BUTTON, self.FndCodInfo)
self.skanag.Bind(wx.EVT_BUTTON, self.StpSkAnag)
#self.pnl.Bind(wx.EVT_BUTTON, self.Addi)
self.delr.Bind(wx.EVT_BUTTON, self.DelRow)
self.dele.Bind(wx.EVT_BUTTON, self.CntrDele)
self.oktestata.Bind(wx.EVT_BUTTON, self.OkTestata)
self.canc.Bind(wx.EVT_BUTTON, self.Close)
self.okart.Bind(wx.EVT_BUTTON, self.OkRow)
self.modi.Bind(wx.EVT_BUTTON, self.Modi)
self.modir.Bind(wx.EVT_BUTTON, self.ModiRow)
self.inte.Bind(wx.EVT_BUTTON, self.IntTestata)
self.intr.Bind(wx.EVT_BUTTON, self.IntRow)
self.newr.Bind(wx.EVT_BUTTON, self.NewRow)
self.new.Bind(wx.EVT_BUTTON, self.New)
self.ok.Bind(wx.EVT_BUTTON, self.Save)
self.stampa.Bind(wx.EVT_BUTTON, self.Stampa)
self.csaldo.Bind(wx.EVT_BUTTON, self.CalcSaldo)
self.lc.Bind(wx.EVT_LEFT_DCLICK, self.ModiRow)
self.lc.Bind(wx.EVT_LIST_ITEM_SELECTED, self.LstSlct) # occhio dovrebbe essere self.lc
self.lc.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.LstSlct) # occhio dovrebbe essere self.lc
self.lc.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.LstAct) # occhio dovrebbe essere self.lc
#self.pnl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.LstSlct) # occhio dovrebbe essere self.lc
#self.pnl.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.LstSlct) # occhio dovrebbe essere self.lc
#self.pnl.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.LstAct) # occhio dovrebbe essere self.lc
#self.pnl.Bind(wx.EVT_LIST_KEY_DOWN, self.DblClick)
self.qt.Bind(wx.EVT_TEXT_ENTER, self.OkRow)
self.sc1.Bind(wx.EVT_TEXT_ENTER, self.OkRow)
self.prezzo.Bind(wx.EVT_TEXT_ENTER, self.OkRow)
self.ccodage.Bind(wx.EVT_BUTTON, self.FndAge)
self.cragsoc1.Bind(wx.EVT_BUTTON, self.FndAnag)
self.cragsoc3.Bind(wx.EVT_BUTTON, self.FndAnagDest)
self.ccodart.Bind(wx.EVT_BUTTON, self.FndCodArt)
self.ctesto.Bind(wx.EVT_BUTTON, self.OpenTesto) #personaliz testo
self.codart.Bind(wx.EVT_TEXT_ENTER, self.FndCodArt)
self.descriz.Bind(wx.EVT_TEXT_ENTER, self.FndDesArt)
self.codage.Bind(wx.EVT_TEXT_ENTER, self.FndAge)
#self.codcf.Bind(wx.EVT_TEXT_ENTER, self.FndCodCF)
self.ragsoc1.Bind(wx.EVT_TEXT_ENTER, self.FndAnag)
self.ragsoc3.Bind(wx.EVT_TEXT_ENTER, self.FndAnagDest)
self.ragsoc1.Bind(wx.EVT_CHAR, self.EvtChar)
#self.note.Bind(wx.EVT_CHAR, self.EvtChar)
self.cnum_ord.Bind(wx.EVT_BUTTON, self.FndOrd)
self.num_ord.Bind(wx.EVT_TEXT_ENTER, self.FndOrd)
self.data_ord.Bind(wx.EVT_TEXT_ENTER, self.CntData)
self.vs_data.Bind(wx.EVT_TEXT_ENTER, self.CntvsData)
self.descriz.Bind(wx.EVT_KILL_FOCUS, self.KillFcs_des)
#self.dataord.Bind(wx.EVT_KILL_FOCUS, self.KillFcs_dataord)
#self.vsdata.Bind(wx.EVT_KILL_FOCUS, self.KillFcs_vsdata)
self.cvsdata.Bind(wx.EVT_BUTTON, self.CntvsData)
self.cdataord.Bind(wx.EVT_BUTTON, self.CntData)
self.vs_ord.Bind(wx.EVT_CHAR, self.EvtChar)
self.num_ord.Bind(wx.EVT_CHAR, self.EvtChar)
self.newr.Bind(wx.EVT_CHAR, self.EvtCharS)
self.cALIVA.Bind(wx.EVT_BUTTON, self.FndSelALIVA)
self.vALIVA.Bind(wx.EVT_TEXT_ENTER, self.FndSelALIVA)
self.codbar.Bind(wx.EVT_TEXT_ENTER, self.FndCodBar)
self.ccodbar.Bind(wx.EVT_BUTTON, self.SelCodBar)
self.codbar.Bind(wx.EVT_TEXT_ENTER, self.FndCodBar)
self.vs_ord.Bind(wx.EVT_TEXT_ENTER, self.KillFcs_vs_ord)
#self.vsdata.Bind(wx.EVT_TEXT_ENTER, self.KillFcs_vsdata)
self.vsrif.Bind(wx.EVT_TEXT_ENTER, self.KillFcs_vsrif)
self.nsrif.Bind(wx.EVT_TEXT_ENTER, self.KillFcs_nsrif)
self.PAGAM.Bind(wx.EVT_TEXT_ENTER, self.KillFcs_PAGAM)
self.note.Bind(wx.EVT_TEXT_ENTER, self.KillFcs_note)
self.rbCONFER.Bind(wx.EVT_RADIOBUTTON, self.RadioB)
self.rbPREVIS.Bind(wx.EVT_RADIOBUTTON, self.RadioB)
#self.rbPREVIS.Bind(wx.EVT_RADIOBUTTON, self.RadioB)
#self.prezzo.Bind(wx.EVT_KILL_FOCUS, self.KillFcs_prezzo)
#self.sc1.Bind(wx.EVT_KILL_FOCUS, self.KillFcs_sc1)
#self.qt.Bind(wx.EVT_KILL_FOCUS, self.KillFcs_qt)
self.PAGAM.Bind(wx.EVT_COMBOBOX, self.SelPAGAM)
self.TIPO_ORD.Bind(wx.EVT_COMBOBOX, self.SelTIPO_ORD)
#cdataord.Bind(wx.EVT_CALENDAR_DAY,self.CalSel)
self.cdest.Bind(wx.EVT_BUTTON, self.CDest)
self.rdest1.Bind(wx.EVT_BUTTON, self.RDest)
self.Bind(wx.EVT_CLOSE, self.Close)
self.Bind(wx.EVT_CHAR, self.EvtCharS)
self.InsLibriaz()
self.InsTabGen()
self.Start(self)
def InsLibriaz(self):
sql = """ select registro from libriaz where registro="PC" """
try:
cr = self.CnAz.cursor ()
cr.execute(sql)
row = cr.fetchone()
if row==None:
print "qua"
import ins_libriaz
self.AggMenu(True,self.IDMENU )
wx.GetApp().GetPhasisMdi().CloseTabObj(self)
except StandardError, msg:
print "qui"
self.CnAz.commit()
def InsTabGen(self):
sql = """ select valore from tabgen where valore="PC" """
try:
cr = self.CnAz.cursor ()
cr.execute(sql)
row = cr.fetchone()
if row==None:
print "qua"
import ins_tabgen
self.AggMenu(True,self.IDMENU )
wx.GetApp().GetPhasisMdi().CloseTabObj(self)
except StandardError, msg:
print "qui"
self.CnAz.commit()
def Start(self, evt):
self.stampa.Enable(False)
self.dtesto.Show(False) # personalizza testo
self.fndvTIPO_ORD.SetValue('')
self.ccodinfo.Enable(False)
self.vTIPO_ORD.SetValue(self.tipoord)
self.cdest.SetLabel(_('Destinatario')) ######
self.lst.SetValue("1")
self.vDIVvend.SetValue("EU")
self.vPRIO.SetValue("5")
self.trasp.SetValue("TRA1")
self.rag_ord.SetValue("A")
self.campo1.SetValue("")
self.campo2.SetValue("")
self.note.SetBackgroundColour(self.color)
self.note_calce.SetBackgroundColour(self.color)
self.DelAnagTxt(self)
self.DelArtTxt(self)
self.OffAnagTxt(self)
self.OffArtTxt(self)
self.data = self.datacon #strftime("%d/%m/%Y")
self.data_ord.SetValue(self.data)
self.vdata_ord.SetValue(self.data)
self.data_ord.Enable(True)
self.TIPO_ORD.Enable(False)
self.num_ord.Enable(True)
self.num_ord.SetFocus()
#self.stt_ord1.SetValue("C")
#self.stt_ord2.SetValue("C")
##self.stt_ord1.SetValue("P")
##self.stt_ord2.SetValue("P")
self.lc.ClearAll()
self.lc.InsertColumn(0, _("Codice"))
self.lc.InsertColumn(1, _("Descrizione"))
self.lc.InsertColumn(2, _("Q.ta`"))
self.lc.InsertColumn(3, _("Prezzo"))
self.lc.InsertColumn(4, _("Sc%"))
self.lc.InsertColumn(5, _("Importo"))
self.lc.InsertColumn(6, _("Iva"))
self.lc.InsertColumn(7, _("UM"))
self.lc.InsertColumn(8, _("Mis"))
self.lc.InsertColumn(9, _("n riga"))
self.lc.InsertColumn(10, "")
self.lc.InsertColumn(11, "")
self.lc.InsertColumn(12, "")
self.lc.InsertColumn(13, "")
self.lc.InsertColumn(14, "")
self.lc.InsertColumn(15, "")
self.lc.InsertColumn(16, "")
self.lc.InsertColumn(17, "")
self.lc.InsertColumn(18, "")
self.lc.InsertColumn(19, "")
self.lc.InsertColumn(20, "")
self.lc.InsertColumn(21, "")
self.lc.InsertColumn(22, "")
self.lc.InsertColumn(23, "")
self.lc.InsertColumn(24, "")
self.lc.InsertColumn(25, "")
self.lc.InsertColumn(26, "")
self.lc.InsertColumn(27, "")
self.lc.InsertColumn(28, "")
self.lc.InsertColumn(29, "")
self.lc.InsertColumn(30, "")
self.lc.SetColumnWidth(0, wx.DLG_SZE(self.pnl, 60,-1).width)
self.lc.SetColumnWidth(1, wx.DLG_SZE(self.pnl, 100,-1).width)
self.lc.SetColumnWidth(2, wx.DLG_SZE(self.pnl, 27,-1).width)
self.lc.SetColumnWidth(3, wx.DLG_SZE(self.pnl, 50,-1).width)
self.lc.SetColumnWidth(4, wx.DLG_SZE(self.pnl, 25,-1).width)
self.lc.SetColumnWidth(5, wx.DLG_SZE(self.pnl, 50,-1).width)
self.lc.SetColumnWidth(6, wx.DLG_SZE(self.pnl, 25,-1).width)
self.lc.SetColumnWidth(7, wx.DLG_SZE(self.pnl, 20,-1).width)
self.lc.SetColumnWidth(8, wx.DLG_SZE(self.pnl, 20,-1).width)
self.lc.SetColumnWidth(9, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(10, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(11, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(12, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(13, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(14, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(15, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(16, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(17, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(18, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(19, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(20, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(21, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(22, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(23, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(24, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(25, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(26, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(27, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(28, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(29, wx.DLG_SZE(self.pnl, 0,-1).width)
self.lc.SetColumnWidth(30, wx.DLG_SZE(self.pnl, 0,-1).width)
#self.lc.SetFont(self.font)
self.lc.SetBackgroundColour(self.color)
self.lc.Enable(False)
self.sbox_cf.SetLabel(_(' Cliente '))
self.tbl = "ord1"
#self.t_cpart.SetValue("C") # valore tipo contro partita
self.vPDC.SetValue("7501")
self.vPDC_SC.SetValue("6105")
self.UM.SetValue("PZ")
self.vALIVA.SetValue("20")
self.vIMBAL.SetValue("IMBVS")
self.vCONFE.SetValue("CFSF")
if (self.tcpart=="C"): self.sbox_cf.SetLabel(_(' Cliente '))
if (self.tcpart=="F"): self.sbox_cf.SetLabel(_(' Fornitore '))
if self.vPAGAM.GetValue()=="" :
self.vPAGAM.SetValue(cfg.tipopagam)
self.sPAGAM = ""
if self.vPAGAM.GetValue()=='PAG23':
self.lprezzo_ac1.SetLabel(_('Importo Finanziamento :'))
else:self.lprezzo_ac1.SetLabel(_(' Saldo alla consegna :'))
self.SelCOMBO(self)
self.cntr = ""
self.cntr_row = ""
self.row = 0
self.ShowFalse(self)
self.EnableFalse(self)
self.new.Enable(True)
self.indiriz.Show(True)
self.cap.Show(True)
self.zona.Show(True)
self.pr.Show(True)
self.codcf.Show(True)
self.ragsoc1.Show(True)
#self.ragsoc2.Show(True)
self.cragsoc1.Show(True)
self.indiriz1.Show(True)
if self.ccodbar.GetValue()==0:
self.codart.Show(True)
self.codbar.Show(False)
else:
self.codart.Show(False)
self.codbar.Show(True)
if (self.rec!=""):
self.num_ord.SetValue(self.rec)
self.FndOrd(self)
if self.tcpart=='F':
self.lcosto.Show(False)
self.costo.Show(False)
#self.lprovv.Show(False)
#self.provv.Show(False)
self.ntbk.SetFocus()
self.ntbk.SetSelection(0)
#self.rbEVASO.SetValue(False)
#self.rbEVASO.Enable(False)
#self.rbCONFER.SetValue(False)
#self.rbPREVIS.SetValue(True)
self.oldnum_ord = ""
self.oldvTIPO_ORD = ""
self.provv.Show(False)
self.lprovv.Show(False)
if self.tipoord=="OC":
self.sTIPO_ORD = 'OC'
self.stt_ord1.SetValue("C")
self.stt_ord2.SetValue("C")
if self.tipoord=="OF":
self.sTIPO_ORD = 'OF'
self.stt_ord1.SetValue("C")
self.stt_ord2.SetValue("C")
if self.tipoord=="PF":
self.sTIPO_ORD = 'PF'
self.stt_ord1.SetValue("P")
self.stt_ord2.SetValue("P")
if self.tipoord=="PC":
self.sTIPO_ORD = 'PC'
self.stt_ord1.SetValue("P")
self.stt_ord2.SetValue("P")
self.SelRadioB(self)
def EnableFalse(self, evt):
self.ccodinfo.Enable(False)
self.skanag.Enable(False)
self.csaldo.Enable(False)
self.cdest.Enable(False)
self.rdest1.Enable(False)
self.indiriz1.Enable(False)
self.zona1.Enable(False)
self.pr1.Enable(False)
self.cap1.Enable(False)
self.localit1.Enable(False)
self.stato1.Enable(False)
self.indiriz.Enable(False)
self.zona.Enable(False)
self.pr.Enable(False)
self.cap.Enable(False)
self.localit.Enable(False)
self.stato.Enable(False)
self.ragsoc3.Enable(False)
self.ragsoc4.Enable(False)
self.codcf1.Enable(False)
self.anno.Enable(False)
self.conse.Enable(False)
self.clst.Enable(False)
self.vDIVvend.Enable(False)
self.lst.Enable(False)
self.PRIO.Enable(False)
self.vPRIO.Enable(False)
self.trasp.Enable(False)
self.rag_ord.Enable(False)
self.codcf.Enable(False)
self.ccodcf.Enable(False)
self.ragsoc1.Enable(False)
self.cragsoc1.Enable(False)
self.ragsoc2.Enable(False)
self.vTIPO_ORD.Enable(False)
#self.note.Enable(False)
self.cmpiliberi.Enable(False)
self.campo1.Enable(False)
self.campo2.Enable(False)
self.ragsoc1age.Enable(False)
self.vsrif.Enable(False)
self.nsrif.Enable(False)
self.vs_ord.Enable(False)
self.vs_data.Enable(False)
self.importo.Enable(False)
self.rbCONFER.Enable(False)
self.rbPREVIS.Enable(False)
self.rbEVASO.Enable(False)
self.vdata_ord.Enable(False)
self.cnum_ord.Enable(True)
self.codmerc.Enable(False)
self.qt_ord.Enable(False)
self.qt_con.Enable(False)
self.qt_eva.Enable(False)
self.prezzo_ag.Enable(False)
self.datacons.Enable(False)
self.colli.Enable(False)
self.peso.Enable(False)
self.stt_ord1.Enable(False)
self.stt_ord2.Enable(False)
self.annodoc.Enable(False)
self.tipodoc.Enable(False)
self.datadoc.Enable(False)
self.numdoc.Enable(False)
self.campo2_art.Enable(False)
self.campo1_art.Enable(False)
self.totaledoc.Enable(False)
self.stampa.Enable(False)
#self.stampac.Enable(False)
self.modi.Enable(False)
self.dele.Enable(False)
self.newr.Enable(False)
self.okart.Enable(False)
self.modir.Enable(False)
self.intr.Enable(False)
self.delr.Enable(False)
self.ccodart.Enable(False)
self.ctesto.Enable(False) # personalizza testo
self.PAGAM.Enable(False)
self.vPAGAM.Enable(False)
self.codage.Enable(False)
self.ccodage.Enable(False)
#self.note.SetBackgroundColour(self.color)
self.note.Enable(False)
#self.note_calce.SetBackgroundColour(self.color)
self.note_calce.Enable(False)
self.scf1.Enable(False)
self.scf2.Enable(False)
self.scf3.Enable(False)
self.tot_colli.Enable(False)
self.tot_peso.Enable(False)
self.vASPET.Enable(False)
self.cASPET.Enable(False)
self.dASPET.Enable(False)
self.vCONSEG.Enable(False)
self.vPDC.Enable(False)
self.vPDC_SC.Enable(False)
self.cvPDC.Enable(False)
self.cvPDC_SC.Enable(False)
self.prezzo_ac.Enable(False)
self.prezzo_ac1.Enable(False)
self.campo2_calce.Enable(False)
self.campo1_calce.Enable(False)
def ShowFalse(self, evt):
self.ragsoc3.Show(False)
self.ragsoc4.Show(False)
self.cragsoc3.Show(False)
self.localit1.Show(False)
self.stato1.Show(False)
self.localit.Show(False)
self.stato.Show(False)
self.indiriz1.Show(False)
self.cap1.Show(False)
self.zona1.Show(False)
self.pr1.Show(False)
self.codcf1.Show(False)
self.ragsoc2.Show(False)
self.ragsoc4.Show(False)
#self.note.Show(False)
#self.ragsoc1age.Show(False)
#self.vdata_ord.Show(False)
self.vTIPO_ORD.Show(False)
self.fndvTIPO_ORD.Show(False)
self.vDIVvend.Show(False)
self.vPRIO.Show(False)
self.conse.Show(False)
self.trasp.Show(False)
self.cod_vet.Show(False)
self.rag_ord.Show(False)
self.campo1.Show(False)
self.campo2.Show(False)
self.vdata_ord.Show(False)
#self.sc1.Show(False)
self.sc2.Show(False)
self.sc3.Show(False)
self.nriga.Show(False)
#self.lvolume.Show(False)
#self.lpeso.Show(False)
#self.volume.Show(False)
#self.peso.Show(False)
#self.vPDC.Show(False)
self.dALIVA.Show(False)
self.vUM.Show(False)
self.vMERCE.Show(False)
self.vinprod.Show(False)
#self.vALIVA.Show(False)
self.vIMBAL.Show(False)
self.vCONFE.Show(False)
self.prezzo1.Show(False)
self.prezzo2.Show(False)
self.importo.Show(False)
self.stt_ord1.Show(False)
self.stt_ord2.Show(False)
self.codmerc.Show(False)
self.qt_ord.Show(False)
self.qt_con.Show(False)
self.qt_eva.Show(False)
self.prezzo_ag.Show(False)
self.datacons.Show(False)
#self.colli.Show(False)
self.peso.Show(False)
self.annodoc.Show(False)
self.tipodoc.Show(False)
self.datadoc.Show(False)
self.numdoc.Show(False)
self.campo2_art.Show(False)
self.campo1_art.Show(False)
self.ok.Show(False)
self.canc.Show(True)
self.oktestata.Show(False)
self.new.Show(True)
self.modi.Show(True)
self.dele.Show(False)
self.vPAGAM.Show(False)
self.vCONSEG.Show(False)
self.campo2_calce.Show(False)
self.campo1_calce.Show(False)
self.inte.Show(False)
def CDest(self, evt):
if self.cdest.GetLabel()== _('Destinatario'): ########
self.cdest.SetLabel(_(' Cliente '))
self.sbox_cf.SetLabel(_(' Destinatario '))
self.lragsoc.SetLabel(_("Destinazione merce :"))
self.codcf.Show(False)
self.codcf1.Show(True)
self.ragsoc1.Show(False)
self.cragsoc1.Show(False)
self.indiriz1.Show(True)
self.ragsoc3.Show(True)
self.ragsoc3.Enable(True)
self.cragsoc3.Show(True)
self.cap1.Show(True)
self.zona1.Show(True)
self.pr1.Show(True)
self.indiriz.Show(False)
self.cap.Show(False)
self.zona.Show(False)
self.pr.Show(False)
self.rdest1.Enable(True)
self.ragsoc3.SetFocus()
else:
self.cdest.SetLabel(_('Destinatario'))
self.sbox_cf.SetLabel(_(' Cliente '))
self.lragsoc.SetLabel(_("Cessionario :"))
self.codcf.Show(True)
self.codcf1.Show(False)
self.ragsoc1.Show(True)
self.cragsoc1.Show(True)
self.indiriz1.Show(True)
self.ragsoc3.Show(False)
self.ragsoc3.Enable(False)
self.cragsoc3.Show(False)
self.indiriz.Show(True)
self.cap.Show(True)
self.zona.Show(True)
self.pr.Show(True)
self.indiriz1.Show(False)
self.cap1.Show(False)
self.zona1.Show(False)
self.pr1.Show(False)
#self.localit1.Show(False)
#self.stato1.Show(False)
self.rdest1.Enable(False)
self.ragsoc1.SetFocus()
def RDest(self, evt):
self.ragsoc3.SetValue('')
self.ragsoc4.SetValue('')
self.indiriz1.SetValue('')
self.zona1.SetValue('')
self.localit1.SetValue('')
self.cap1.SetValue('')
self.pr1.SetValue('')
self.stato1.SetValue('')
self.codcf1.SetValue('')
self.ragsoc3.SetFocus()
def SelCodBar(self, evt):
if self.ccodbar.GetValue()==0 :
self.ccodbar.SetToggle(False)
self.codart.Show(True)
self.codbar.Show(False)
self.lcod.SetLabel(_("Codice :"))
self.codart.SetFocus()
self.codbar.SetValue('')
else:
self.ccodbar.SetToggle(True)
self.codart.Show(False)
self.codbar.Show(True)
self.lcod.SetLabel(_("BarCod:"))
self.codbar.SetFocus()
self.codart.SetValue('')
def SelRadioB(self, evt):
if (self.stt_ord1.GetValue()=="P"):
self.stt_ord1.SetValue("P")
self.stt_ord2.SetValue("P")
self.rbPREVIS.SetValue(True)
self.rbCONFER.SetValue(False)
self.rbEVASO.SetValue(False)
elif (self.stt_ord1.GetValue()=="C"):
self.stt_ord2.SetValue("C")
self.rbPREVIS.SetValue(False)
self.rbCONFER.SetValue(True)
self.rbEVASO.SetValue(False)
elif (self.stt_ord1.GetValue()=="E"):
self.stt_ord2.SetValue("E")
self.rbPREVIS.SetValue(False)
self.rbCONFER.SetValue(False)
self.rbEVASO.SetValue(True)
#elif (self.stt_ord1.GetValue()=="G"):
# self.stt_ord2.SetValue("G")
# self.rbPREVIS.SetValue(False)
# self.rbCONFER.SetValue(False)
# self.rbEVASO.SetValue(True)
def RadioB(self, evt):
if self.rbPREVIS.GetValue()==True:
self.stt_ord1.SetValue("P")
self.stt_ord2.SetValue("P")
if self.rbEVASO.GetValue()==True:
self.stt_ord1.SetValue("E")
self.stt_ord2.SetValue("E")
elif self.rbCONFER.GetValue()==True:
self.stt_ord1.SetValue("C")
self.stt_ord2.SetValue("C")
self.New(self)#modifica
def IntTestata(self, evt):
if(self.voktestata==1):
dlg = wx.MessageDialog(self, cfg.msgint, self.ttl,
wx.YES_NO | wx.NO_DEFAULT |wx.ICON_QUESTION)
if dlg.ShowModal()==wx.ID_YES:
self.rec = ""
self.Start(self)
self.cdest.SetLabel(_(' Cliente '))
self.CDest(self)
else:
dlg.Destroy()
else:
self.rec = ""
self.stt_ord1.SetValue("")
self.stt_ord2.SetValue("")
self.Start(self)
self.cdest.SetLabel(_(' Cliente '))
self.CDest(self)
def NewTxt(self, evt):
self.OnAnagTxt(self)
self.TIPO_ORD.Enable(False)
self.num_ord.Enable(False)
self.cnum_ord.Enable(False)
self.data_ord.Enable(True)
self.data_ord.SetFocus()
self.new.Show(False)
self.ok.Show(False)
self.oktestata.Show(True)
self.canc.Show(False)
self.inte.Show(True)
self.modi.Enable(False)
def ModiTxt(self, evt):
self.OnAnagTxt(self)
self.cntr = "modi"
self.TIPO_ORD.Enable(False)
self.num_ord.Enable(False)
self.cnum_ord.Enable(False)
self.data_ord.Enable(True)
self.data_ord.SetFocus()
self.new.Show(False)
self.ok.Show(False)
self.oktestata.Show(True)
self.canc.Show(False)
self.inte.Show(True)
self.modi.Enable(False)
self.modi.Show(False)
self.dele.Show(True)
self.dele.Enable(True)
def KillFcs_colli(self, evt):
self.qt.SetFocus()
def KillFcs_vs_ord(self, evt):
self.vs_data.SetFocus()
#def KillFcs_vsdata(self, evt):
# self.ragsoc1.SetFocus()
def KillFcs_note(self, evt):
self.codage.SetFocus()
def KillFcs_vsrif(self, evt):
self.nsrif.SetFocus()
def KillFcs_nsrif(self, evt):
self.note.SetFocus()
def KillFcs_PAGAM(self, evt):
self.oktestata.SetFocus()
def CntvsData(self, evt):
if (self.cntr=="new" or self.cntr=="modi"):
cnt_gma = 0
vsdata = self.vs_data.GetValue().strip()
if vsdata !="":
gma = vsdata.split('/')
try:
if (gma[0].isdigit()!=True):
self.Message(cfg.msgdatano ,self.ttl)
elif (gma[1].isdigit()!=True):
self.Message(cfg.msgdatano ,self.ttl)
elif (gma[2].isdigit()!=True):
self.Message(cfg.msgdatano ,self.ttl)
except:
self.Message(cfg.msgdatano ,self.ttl)
if len(gma)==3:
gg = int(gma[0])
mm = int(gma[1])
aa = int(gma[2])
if gg > 0 and gg < 31:
cnt_gma+=1
if mm>=0 and mm<=12:
cnt_gma+=1
if aa<=int(self.annoc) :
cnt_gma+=1
if cnt_gma!=3: self.Message(cfg.msgdatano ,self.ttl)
if cnt_gma==3: self.ragsoc1.SetFocus()
else:self.ragsoc1.SetFocus()
def CntData(self, evt):
if (self.cntr=="new" or self.cntr=="modi"):
data_ord = self.data_ord.GetValue().strip()
cnt_gma = 0
gma = data_ord.split('/')
try:
if (gma[0].isdigit()!=True):
self.Message(cfg.msgdatano ,self.ttl)
elif (gma[1].isdigit()!=True):
self.Message(cfg.msgdatano ,self.ttl)
elif (gma[2].isdigit()!=True):
self.Message(cfg.msgdatano ,self.ttl)
except:
self.Message(cfg.msgdatano ,self.ttl)
if len(gma)==3:
gg = int(gma[0])
mm = int(gma[1])
aa = int(gma[2])
if gg > 0 and gg<=31:
cnt_gma+=1
if mm>=0 and mm<=12:
cnt_gma+=1
if aa==int(self.annoc):
cnt_gma+=1
vdata_ord = self.vdata_ord.GetValue()
vgma = vdata_ord.split('/')
vgg = int(vgma[0])
vmm = int(vgma[1])
vaa = int(vgma[2])
vdata = int(vgma[2] + vgma[1] + vgma[0])
data = int(gma[2] + gma[1] + gma[0])
self.vs_ord.SetFocus()
if data < vdata :
dlg = wx.MessageDialog(self,cfg.msgdatault ,
self.ttl, wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if dlg.ShowModal()==wx.ID_YES:
self.vdata_ord.SetValue(self.data_ord.GetValue())
self.data_ord.Enable(False)
self.num_ord.Enable(False)
dlg.Destroy()
else:
self.data_ord.SetFocus()
dlg.Destroy()
else:
self.vdata_ord.SetValue(self.data_ord.GetValue())
self.data_ord.Enable(False)
self.num_ord.Enable(False)
if cnt_gma==2 and aa <> int(self.annoc):
self.Message(cfg.msgdataes + self.annoc,self.ttl)
elif cnt_gma!=3 : self.Message(cfg.msgdatano ,self.ttl)
def EvtChar(self, evt):
evt_char = evt.GetKeyCode()
if (evt_char==27 and self.cntr==""):self.canc.SetFocus()
if (evt_char==27 and self.cntr!=""):self.inte.SetFocus()
evt.Skip()
def EvtCharS(self, evt):
evt_char = evt.GetKeyCode()
if evt_char==49:
self.ntbk.SetSelection(0)
if (self.cntr=="new" or self.cntr=="modi") : self.ok.SetFocus()
if evt_char==50:
self.ntbk.SetSelection(1)
self.ntbk.SetFocus()
if evt_char==51: self.ntbk.SetSelection(2)
evt.Skip()
def OffAnagTxt(self, evt):
self.num_ord.Enable(False)
self.cnum_ord.Enable(False)
self.data_ord.Enable(False)
self.vs_ord.Enable(False)
self.vs_data.Enable(False)
self.rbCONFER.Enable(False)
self.rbPREVIS.Enable(False)
def OnAnagTxt(self ,evt):
self.codage.Enable(True)
self.ccodage.Enable(True)
self.nsrif.Enable(True)
self.vsrif.Enable(True)
self.vs_ord.Enable(True)
self.vs_data.Enable(True)
self.ragsoc1.Enable(True)
self.cragsoc1.Enable(True)
self.note.SetBackgroundColour(wx.Colour(255, 255, 255))
self.note.Enable(True)
self.PAGAM.Enable(True)
self.rbCONFER.Enable(True)
self.rbPREVIS.Enable(True)
self.cdest.Enable(True)
def DelAnagTxt(self, evt):
self.num_ord.SetValue('')
self.vdata_ord.SetValue('')
self.data_ord.SetValue('')
self.vs_ord.SetValue('')
self.vs_data.SetValue('')
self.codcf.SetValue('')
self.codcf1.SetValue('')
self.codage.SetValue('')
self.ragsoc1.SetValue('')
self.ragsoc2.SetValue('')
self.indiriz.SetValue('')
self.zona.SetValue('')
self.localit.SetValue('')
self.cap.SetValue('')
self.pr.SetValue('')
self.stato.SetValue('')
self.ragsoc3.SetValue('')
self.ragsoc4.SetValue('')
self.indiriz1.SetValue('')
self.zona1.SetValue('')
self.localit1.SetValue('')
self.cap1.SetValue('')
self.pr1.SetValue('')
self.stato1.SetValue('')
self.note.SetValue('')
self.note_calce.SetValue('')
self.sc2.SetValue('0,00')
self.scf1.SetValue('0,00')
self.scf2.SetValue('0,00')
self.scf3.SetValue('0,00')
vPAGAM = self.vPAGAM.GetValue()
if vPAGAM=="" :
self.vPAGAM.SetValue(cfg.tipopagam)
self.sPAGAM = ""
self.vPDC.SetValue('7501')
self.vPDC_SC.SetValue('6105')
self.vALIVA.SetValue("20")
self.dALIVA.SetValue(_("Aliquota 20%"))
#self.reg_def.SetValue('N')
self.vsrif.SetValue('')
self.nsrif.SetValue('')
self.ragsoc1age.SetValue('')
self.prezzo_ac.SetValue('0,00')
self.prezzo_ac1.SetValue('0,00')
self.prezzo_ag.SetValue('0,00')
self.totale.SetValue('0,00')
self.provv.SetValue('0,00')
self.colli.SetValue('0,00')
self.peso.SetValue('0,00')
self.tot_colli.SetValue('0,00')
self.tot_peso.SetValue('0,00')
self.totaledoc.SetValue('0,00')
def SelCOMBO(self, evt):
vPAGAM = self.vPAGAM.GetValue()
self.PAGAM.Clear()
vTIPO_ORD = self.vTIPO_ORD.GetValue()
self.TIPO_ORD.Clear()
sql = """ select * from tabgen """
try:
cr = self.CnAz.cursor ()
cr.execute(sql)
while (1):
row = cr.fetchone ()
if row==None:
break
if (row[0]=="PAGAM"):
if (row[1]==vPAGAM):self.sPAGAM = row[2]
self.PAGAM.Append(row[2],row[1])
if (row[0]=="TIPOORD"):
if (row[1]==vTIPO_ORD):self.sTIPO_ORD = row[2]
self.TIPO_ORD.Append(row[2],row[1])
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," SelCOMBO tabgen Error %s" % (msg))
self.CnAz.commit()
cntPAGAM = 0
cntPAGAM = self.PAGAM.FindString(self.sPAGAM)
self.PAGAM.Select(cntPAGAM)
cntTIPO_ORD = 0
cntTIPO_ORD = self.TIPO_ORD.FindString(self.sTIPO_ORD)
self.TIPO_ORD.Select(cntTIPO_ORD)
def SelPAGAM(self, evt):
self.Sel(evt)
self.vPAGAM.SetValue(self.cb_val)
if self.vPAGAM.GetValue()=='PAG23':
self.lprezzo_ac1.SetLabel(_('Importo Finanziamento :'))
else:self.lprezzo_ac1.SetLabel(_(' Saldo alla consegna :'))
def SelTIPO_ORD(self, evt):
self.Sel(evt)
self.vTIPO_ORD.SetValue(self.cb_val)
def Sel(self, evt):
cb = evt.GetEventObject()
self.cb_val = cb.GetClientData(cb.GetSelection())
self.cb_str = evt.GetString()
evt.Skip()
#self.SetClientSize(wx.Size(680, 425))
def SetFcs(self, evt):
evt.Skip()
#def KillFcs_qt(self, evt):
# vqt = self.qt.GetValue().replace(",","")
# if (vqt.isdigit()!=True):
# self.Message(cfg.msgqtno,self.ttl)
# self.qt.SetFocus()
# #evt.Skip()
def KillFcs_des(self, evt):
if self.codart.GetValue()=='':
self.descriz.SetValue(self.descriz.GetValue().upper())
#def KillFcs_prezzo(self, evt):
# vprezzo = self.prezzo.GetValue().replace(",","")
# if (vprezzo.isdigit()!=True):
# self.Message(cfg.msgprezno,self.ttl)
# self.prezzo.SetFocus()
#def KillFcs_sc1(self, evt):
# vsc1 = self.sc1.GetValue().replace(",","")
# if (vsc1.isdigit()!=True):
# self.Message(cfg.msgscno,self.ttl)
# self.sc1.SetFocus()
def getColTxt(self, index, col):
item = self.lc.GetItem(index, col)
return item.GetText()
def LstSlct(self, evt):
self.currentItem = evt.m_itemIndex
self.codart.SetValue(self.lc.GetItemText(self.currentItem))
self.descriz.SetValue(self.getColTxt(self.currentItem, 1))
self.qt.SetValue(self.getColTxt(self.currentItem, 2))
self.prezzo.SetValue(self.getColTxt(self.currentItem, 3))
self.sc1.SetValue(self.getColTxt(self.currentItem, 4))
self.importo.SetValue(self.getColTxt(self.currentItem, 5))
self.vALIVA.SetValue(self.getColTxt(self.currentItem, 6))
self.UM.SetValue(self.getColTxt(self.currentItem, 7))
self.mis.SetValue(self.getColTxt(self.currentItem, 8))
self.nriga.SetValue(self.getColTxt(self.currentItem, 9))
self.codbar.SetValue(self.getColTxt(self.currentItem, 10))
self.codmerc.SetValue(self.getColTxt(self.currentItem, 11))
self.qt_ord.SetValue(self.getColTxt(self.currentItem, 12))
self.qt_con.SetValue(self.getColTxt(self.currentItem, 13))
self.qt_eva.SetValue(self.getColTxt(self.currentItem, 14))
self.prezzo_ag.SetValue(self.getColTxt(self.currentItem, 15))
self.sc2.SetValue(self.getColTxt(self.currentItem, 16))
self.sc3.SetValue(self.getColTxt(self.currentItem, 17))
self.provv.SetValue(self.getColTxt(self.currentItem, 18))
self.datacons.SetValue(self.getColTxt(self.currentItem, 19))
self.colli.SetValue(self.getColTxt(self.currentItem, 20))
self.peso.SetValue(self.getColTxt(self.currentItem, 21))
self.lst.SetValue(self.getColTxt(self.currentItem, 22))
self.vPDC.SetValue(self.getColTxt(self.currentItem, 23))
self.stt_ord2.SetValue(self.getColTxt(self.currentItem, 24))
self.annodoc.SetValue(self.getColTxt(self.currentItem, 25))
self.tipodoc.SetValue(self.getColTxt(self.currentItem, 26))
self.datadoc.SetValue(self.getColTxt(self.currentItem, 27))
self.numdoc.SetValue(self.getColTxt(self.currentItem, 28))
self.campo1_art.SetValue(self.getColTxt(self.currentItem, 29))
self.campo2_art.SetValue(self.getColTxt(self.currentItem, 30))
self.row = self.currentItem
self.SelRow(self)
def LstAct(self, evt):
self.SelRow(self)
self.currentItem = evt.m_itemIndex
def FndSelAnag(self, evt):
row = evt
self.codcf.SetValue(str(row[1]))
self.ragsoc1.SetValue(str(row[3]).title())
self.ragsoc2.SetValue(str(row[4]).title())
self.__MDI__.CnvVM(row[30])
if(self.__MDI__.val==""):self.__MDI__.val = "0"
self.sc1.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM(row[31])
self.sc2.SetValue(self.__MDI__.val)
self.indiriz.SetValue(str(row[6]).title())
cap = string.zfill(str(row[7]).strip(),5)
self.cap.SetValue(cap)
self.zona.SetValue(str(row[8]).title())
self.localit.SetValue(str(row[9]))
self.pr.SetValue(str(row[10]).strip().upper())
self.stato.SetValue(str(row[11]).strip().upper())
if (self.cntr!="new"):
self.indiriz1.SetValue(str(row[12]).title())
cap1 = string.zfill(str(row[7]).strip(),5)
self.cap1.SetValue(cap1)
self.zona1.SetValue(str(row[14]).title())
self.localit1.SetValue(str(row[15]))
self.pr1.SetValue(str(row[16]).strip().upper())
self.stato1.SetValue(str(row[17]).strip().upper())
self.sTIPO_ORD = self.tipoord
self.vPAGAM.SetValue(str(row[41]))
if self.vPAGAM.GetValue()=="":
self.vPAGAM.SetValue(cfg.tipopagam)
self.sPAGAM = ""
#self.note.SetValue(row[29])
#self.reg.SetValue(row[30])
self.SelCOMBO(self)
self.OffAnagTxt(self)
self.rbCONFER.Enable(False)
self.rbPREVIS.Enable(False)
if (self.cntr=="modi"):
self.skanag.Enable(True)
self.inte.Show(True)
self.dele.Show(False)
self.dele.Enable(False)
self.modi.Show(True)
self.modi.Enable(True)
self.modi.SetFocus()
if (self.cntr=="new"):
self.codage.SetValue(str(row[38]))
if self.codage.GetValue()!="": self.FndAge(self)
self.canc.Show(False)
self.inte.Show(True)
self.new.Show(False)
self.oktestata.SetFocus()
self.dele.Show(False)
self.dele.Enable(False)
self.modi.Show(True)
self.modi.Enable(False)
self.vsrif.SetFocus()
def FndALIVA(self, evt):
val = self.vALIVA.GetValue()
cod = "ALIVA"
cnt_rec = 0
sql = """ select * from tabgen where cod = "%s" and valore like "%s" """
valueSql = cod, '%' + val + '%'
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndALIVA Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==1 and cnt_rec<2):
self.dALIVA.SetValue(row[2])
self.qt.SetFocus()
elif (cnt_rec>1):
try:
import srctabg
except :
pass
try:
import base.srctabg
except :
pass
control = ['Ricerca Cod. IVA',self.vALIVA,self.dALIVA,self.FndSelALIVA,'ALIVA']
win = srctabg.create(self,control)
win.Centre(wx.BOTH)
win.Show(True)
def FndSelALIVA(self, evt):
val = self.vALIVA.GetValue()
cod = "ALIVA"
cnt_rec = 0
sql = """ select * from tabgen where cod = "%s" and valore = "%s" """
valueSql = cod, val
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndSelALIVA Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==1 and cnt_rec<2):
self.dALIVA.SetValue(row[2])
self.qt.SetFocus()
else:self.FndALIVA(self)
def FndSelOrd(self, evt):
if self.cntr=="new" :
self.EnableFalse(self)
self.oktestata.Show(False)
self.new.Show(True)
self.stampa.Enable(True)
row = evt
self.vTIPO_ORD.SetValue(str(row[0]))
self.anno.SetValue(str(row[1]))
self.num_ord.SetValue(str(row[2]))
self.vdata_ord.SetValue(str(row[3]))
self.data_ord.SetValue(str(row[3]))
self.codcf.SetValue(str(row[4]))
self.ragsoc1.SetValue(str(row[5]))
self.ragsoc2.SetValue(str(row[6]))
self.indiriz.SetValue(str(row[7]))
cap = string.zfill(str(row[8]).strip(),5)
self.cap.SetValue(cap)
self.zona.SetValue(str(row[9]))
self.localit.SetValue(str(row[10]))
self.pr.SetValue(str(row[11]))
self.stato.SetValue(str(row[12]))
self.__MDI__.CnvNone(row[13])
self.codcf1.SetValue(self.__MDI__.val)
self.__MDI__.CnvNone(row[14])
self.ragsoc3.SetValue(self.__MDI__.val)
self.__MDI__.CnvNone(row[15])
self.ragsoc4.SetValue(self.__MDI__.val)
self.__MDI__.CnvNone(row[16])
self.indiriz1.SetValue(self.__MDI__.val)
if row[17]=='' or row[17]==None:
self.cap1.SetValue('')
else:
cap1 = string.zfill(str(row[17]).strip(),5)
self.cap1.SetValue(cap1)
self.__MDI__.CnvNone(row[18])
self.zona1.SetValue(self.__MDI__.val)
self.__MDI__.CnvNone(row[19])
self.localit1.SetValue(self.__MDI__.val)
self.__MDI__.CnvNone(row[20])
self.pr1.SetValue(self.__MDI__.val)
self.__MDI__.CnvNone(row[21])
self.stato1.SetValue(self.__MDI__.val)
self.stt_ord1.SetValue(str(row[22]))
self.lst.SetValue(str(row[23]))
self.vs_ord.SetValue(str(row[24]))
self.vs_data.SetValue(str(row[25]))
self.vDIVvend.SetValue(str(row[26]))
codage = str(row[27])
self.codage.SetValue(codage)
if (codage!=""): self.FndAge(self)
self.vPRIO.SetValue(str(row[28]))
self.vPAGAM.SetValue(str(row[29]))
self.vCONSEG.SetValue(str(row[30]))
self.trasp.SetValue(str(row[31]))
self.cod_vet.SetValue(str(row[32]))
self.vsrif.SetValue(str(row[33]))
self.nsrif.SetValue(str(row[34]))
self.rag_ord.SetValue(str(row[35]))
self.campo1.SetValue(str(row[36]))
self.campo2.SetValue(str(row[37]))
self.note.SetValue(str(row[38]))
self.vASPET.SetValue(str(row[39]))
#self.tot_colli.SetValue(str(row[40]))
#self.tot_peso.SetValue(str(row[41]))
#self.scf1.SetValue(str(row[42]))
#self.scf2.SetValue(str(row[43]))
#self.scf3.SetValue(str(row[44]))
self.tot_colli.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM(row[41])
self.tot_peso.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM(row[42])
self.scf1.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM(row[43])
self.scf2.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM(row[44])
self.scf3.SetValue(self.__MDI__.val)
self.vPDC_SC.SetValue(str(row[45]))
self.prezzo_ac.SetValue(str(row[46]))
self.prezzo_ac1.SetValue(str(row[47]))
self.campo1_calce.SetValue(str(row[48]))
self.campo2_calce.SetValue(str(row[49]))
self.note_calce.SetValue(str(row[50]))
self.SelCOMBO(self)
self.FndOrdCorpo(self)
self.TIPO_ORD.Enable(False)
self.num_ord.Enable(False)
self.data_ord.Enable(False)
self.new.Enable(False)
self.canc.Show(False)
self.inte.Show(True)
self.SelRadioB(self)
if self.stt_ord1.GetValue()=="P" or self.stt_ord1.GetValue()=="C":
self.skanag.Enable(True)
self.modi.Enable(True)
self.modi.SetFocus()
else:
self.Message(cfg.msgordicons,self.ttl)
self.inte.SetFocus()
def FndOrd(self, evt):
fndvTIPO_ORD = self.fndvTIPO_ORD.GetValue()
vnumord = self.num_ord.GetValue()
#if num_ord=="" :
#self.Message(cfg.msgass + " --> " + self.tbl,self.ttl)
if vnumord=='' : vnumord = 0
#else:
cnt_rec = 0
anno = self.anno.GetValue()
vTIPO_ORD = self.vTIPO_ORD.GetValue()
if fndvTIPO_ORD!="" and self.rec=="":
sql = """ select * from ordi1
where num_ord = "%s" and tipo_ord = "%s"
and anno = "%s" """
valueSql = int(vnumord), fndvTIPO_ORD, anno
elif self.rec!="":
sql = """ select * from ordi1
where num_ord = "%s" and tipo_ord = "%s"
and anno = "%s" """
valueSql = int(vnumord), vTIPO_ORD, anno
elif int(vnumord) != 0 :
sql = """ select * from ordi1
where tipo_ord = "%s" and num_ord = "%s" and anno = "%s" """
valueSql = vTIPO_ORD, int(vnumord), anno
else :
sql = """ select * from ordi1
where tipo_ord = "%s" and anno = "%s" """
valueSql = vTIPO_ORD, anno
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
####if rows==[] and vnumord!=0:
#### self.Message(cfg.msgass + self.tbl,self.ttl)
cnt_rec=len(rows)
####for row in rows:
#### cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndOrd Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==0): self.Message(cfg.msgass +" --> ",self.ttl)
elif (cnt_rec==1): self.FndSelOrd(rows[0])
#elif (cnt_rec>1):
else :
import srcord
stt_ord = ''
self.fndvTIPO_ORD.SetValue('')
control = [self.vTIPO_ORD, self.anno, self.num_ord,
self.vdata_ord, self.FndOrd, stt_ord, self.fndvTIPO_ORD]
win = srcord.create(self,control)
win.Centre(wx.BOTH)
win.Show(True)
####elif(self.cntr=="new"): self.data_ord.SetFocus()
#elif (cnt_rec==0 ):
#self.Message(cfg.msgass + " --> " + self.tbl,self.ttl)
def FndOrdCorpo(self, evt):
rowlc = 0
cnt_rec = 0
num_ord = self.num_ord.GetValue()
anno = self.anno.GetValue()
vTIPO_ORD = self.vTIPO_ORD.GetValue()
sql = """ select * from ordi2 where num_ord = "%s"
and tipo_ord = "%s" and anno = "%s"
order by num_rig desc """
valueSql = int(num_ord), vTIPO_ORD, anno
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
for rowlc in range(1):
row_lc = self.lc.GetItemCount()
self.__MDI__.CnvVM(row[11])
qt_ord = self.__MDI__.val
self.__MDI__.CnvVM(row[12])
qt_con = self.__MDI__.val
self.__MDI__.CnvVM(row[13])
qt_eva = self.__MDI__.val
self.__MDI__.CnvVM5(row[14])
prezzo = self.__MDI__.val
self.__MDI__.CnvVM(row[18])
sc1 = self.__MDI__.val
if (sc1==""):sc1 = "0,0"
self.__MDI__.CnvVM(row[19])
sc2 = self.__MDI__.val
self.__MDI__.CnvVM(row[20])
sc3 = self.__MDI__.val
self.__MDI__.CnvVM(row[16])
tot_riga = self.__MDI__.val
self.lc.InsertStringItem(rowlc, row[5])
self.lc.SetStringItem(rowlc, 1, row[8])
self.lc.SetStringItem(rowlc, 2, qt_ord)
self.lc.SetStringItem(rowlc, 3, prezzo)
self.lc.SetStringItem(rowlc, 4, sc1)
self.lc.SetStringItem(rowlc, 5, tot_riga)
self.lc.SetStringItem(rowlc, 6, row[17])
self.lc.SetStringItem(rowlc, 7, row[9])
self.lc.SetStringItem(rowlc, 8, row[10])
self.lc.SetStringItem(rowlc, 9, str(row[4]))
self.lc.SetStringItem(rowlc, 10, str(row[6]))
self.lc.SetStringItem(rowlc, 11, row[7])
self.lc.SetStringItem(rowlc, 12, qt_ord)
self.lc.SetStringItem(rowlc, 13, qt_con)
self.lc.SetStringItem(rowlc, 14, qt_eva)
self.lc.SetStringItem(rowlc, 15, str(row[15]))
self.lc.SetStringItem(rowlc, 16, sc2)
self.lc.SetStringItem(rowlc, 17, sc3)
self.lc.SetStringItem(rowlc, 18, str(row[21]))
self.lc.SetStringItem(rowlc, 19, str(row[22]))
self.lc.SetStringItem(rowlc, 20, str(row[23]))
self.lc.SetStringItem(rowlc, 21, str(row[24]))
self.lc.SetStringItem(rowlc, 22, str(row[25]))
self.lc.SetStringItem(rowlc, 23, str(row[26]))
self.lc.SetStringItem(rowlc, 24, str(row[27]))
self.lc.SetStringItem(rowlc, 25, str(row[28]))
self.lc.SetStringItem(rowlc, 26, str(row[29]))
self.lc.SetStringItem(rowlc, 27, str(row[30]))
self.lc.SetStringItem(rowlc, 28, str(row[31]))
self.lc.SetStringItem(rowlc, 29, str(row[32]))
self.lc.SetStringItem(rowlc, 30, str(row[33]))
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndOrdCorpo Error %s" % (msg))
self.CnAz.commit()
self.CalcTotale(self)
def Modi(self, evt):
dlg = wx.MessageDialog(self, cfg.msgmodi_doc, self.ttl,
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if dlg.ShowModal()==wx.ID_YES:
self.ModiTxt(self)
#self.ntbk.SetFocus()
dlg.Destroy()
else:
self.cntr = ""
dlg.Destroy()
def CntrDele(self, evt):
dlg = wx.MessageDialog(self, cfg.msgnodele_doc, self.ttl,
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_EXCLAMATION)
if dlg.ShowModal()==wx.ID_YES:
self.Dele(self)
else:
self.cntr = ""
dlg.Destroy()
def Dele(self, evt):
dlg = wx.MessageDialog(self, cfg.msgdele_doc,self.ttl,
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if dlg.ShowModal()==wx.ID_YES:
self.cntr = ""
vTIPO_ORD = self.vTIPO_ORD.GetValue()
vanno = self.anno.GetValue()
vnum_ord = self.num_ord.GetValue()
valueSql = vTIPO_ORD, vanno, int(vnum_ord)
try:
cr = self.CnAz.cursor()
sql = """ delete * from ordi1
where tipo_ord = "%s" and anno = "%s"
and num_ord = "%s" """
cr.execute(sql % valueSql)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," Dele ordi1 Error %s" % (msg))
self.CnAz.commit()
try:
cr = self.CnAz.cursor()
sql = """ delete * from ordi2
where tipo_ord = "%s" and anno = "%s"
and num_ord = "%s" """
cr.execute(sql % valueSql)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," Dele ordi2 Error %s" % (msg))
self.CnAz.commit()
self.Start(self)
dlg.Destroy()
else:
self.cntr = ""
dlg.Destroy()
def OkTestata(self, evt):
if (self.codcf.GetValue()==""):
self.Message(cfg.msgnocod,self.ttl)
self.cdest.SetLabel(_(' Cliente '))
self.CDest(self)
self.ragsoc1.SetFocus()
else:
self.voktestata = 1
self.codage.Enable(False)
self.ccodage.Enable(False)
self.ragsoc1.Enable(False)
self.cragsoc1.Enable(False)
self.ragsoc3.Enable(False)
self.cragsoc3.Enable(False)
self.cdest.Enable(False)
self.nsrif.Enable(False)
self.vsrif.Enable(False)
#self.abi.Enable(False)
#self.cab.Enable(False)
#self.banca.Enable(False)
#self.note.SetBackgroundColour(self.color)
self.note.Enable(False)
#self.note_calce.SetBackgroundColour(self.color)
self.note_calce.Enable(False)
self.PAGAM.Enable(False)
self.lc.SetBackgroundColour(wx.Colour(255, 255, 255))
self.lc.Enable(True)
self.prezzo_ac.Enable(True)
self.prezzo_ac1.Enable(True)
self.csaldo.Enable(True)
#self.note_calce.Enable(True)
#if (self.cntr=="modi"):
#self.LoadOrd(self)
print self.cntr
if (self.cntr=="new" or self.cntr=="modi"):
self.OffAnagTxt(self)
self.oktestata.Show(False)
self.ok.Show(True)
self.ok.Enable(True)
self.new.Show(False)
self.ntbk.SetSelection(1)
self.newr.Enable(True)
self.newr.SetFocus()
def FndAge(self, evt):
cnt = 0
sql = """ select max(cod) from agenti """
try:
cr = self.CnAz.cursor ()
cr.execute(sql)
cnt = cr.fetchone()
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndAge max Error %s" % (msg))
self.CnAz.commit()
cnt_rec = 0
tcpart_age = "A"
self.ragsoc1age.SetValue("")
cod = self.codage.GetValue()
val = self.codage.GetValue()
#val = self.ragsoc1age.GetValue()
if cod=="" : cod = 0
elif (cod.isdigit()!=True): cod = 0
sql = """ select cod, rag_soc1, provv from agenti
where cod = "%s" or rag_soc1 like "%s" """
valueSql = int(cod), '%' + val.title() + '%'
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndAge Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==0 and cnt[0]>1): cnt_rec=cnt[0]
if (cnt_rec==0 ):
self.PAGAM.SetFocus()
self.codage.SetValue("")
self.ragsoc1age.SetValue("")
elif (cnt_rec==1 and cnt_rec<2): self.FndSelAge(row)
elif (cnt_rec>1):
try:
import srcanag
except :
pass
try:
import base.srcanag
except :
pass
control = ['A',self.codage,self.ragsoc1age,self.FndAge]
win = srcanag.create(self,control)
win.Centre(wx.BOTH)
win.Show(True)
def FndSelAge(self, evt):
row = evt
self.codage.SetValue(str(row[0]))
self.ragsoc1age.SetValue(str(row[1]).title())
self.__MDI__.CnvVM(row[2])
self.provv.SetValue(self.__MDI__.val)
self.PAGAM.SetFocus()
def FndCodCF(self, evt):
cnt_rec = 0
val = self.ragsoc1.GetValue().upper()
cod = self.codcf.GetValue()
sql = """ select * from anag where cod = "%s" and t_cpart = "%s" """
valueSql = int(cod), self.tcpart
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndCodCF Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==0):self.Message(cfg.msgdatono,self.ttlanag)
elif (cnt_rec==1 and cnt_rec<2): self.FndSelAnag(row)
def FndAnag(self, evt):
cnt_rec = 0
val = self.ragsoc1.GetValue().title()
cod = self.codcf.GetValue()
sql = """ select * from anag where rag_soc1 like "%s"
and t_cpart = "%s" """
valueSql = '%' + val + '%', self.tcpart
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndAnag Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==0):self.Message(cfg.msgdatonull,self.ttlanag)
elif (cnt_rec==1 and cnt_rec<2): self.FndSelAnag(row)
elif (cnt_rec>1):
try:
import srcanag
except :
pass
try:
import base.srcanag
except :
pass
control = [self.tcpart,self.codcf,self.ragsoc1,self.FndCodCF]
win = srcanag.create(self,control)
win.Centre(wx.BOTH)
win.Show(True)
else:
self.nsrif.SetFocus()
def FndSelAnag(self, evt):
row = evt
self.codcf.SetValue(str(row[1]))
self.ragsoc1.SetValue(str(row[3]).title())
self.ragsoc2.SetValue(str(row[4]).title())
self.__MDI__.CnvVM(row[30])
if(self.__MDI__.val==""):self.__MDI__.val = "0"
self.sc1.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM(row[31])
self.sc2.SetValue(self.__MDI__.val)
if row[12]=='':
cap = string.zfill(str(row[7]).strip(),5)#'%05s' % row[7]
if cap=="00000" : cap = ""
self.indiriz.SetValue(str(row[6]).title())
self.zona.SetValue(str(row[8]).title())
self.localit.SetValue(str(row[9]))
self.cap.SetValue(cap)
self.pr.SetValue(str(row[10]).strip().upper())
else:
cap = string.zfill(str(row[13]).strip(),5)#'%05s' % row[13]
if cap=="00000" : cap = ""
self.indiriz.SetValue(str(row[12]).title())
self.zona.SetValue(str(row[14]).title())
self.localit.SetValue(str(row[15]))
self.cap.SetValue(cap)
self.pr.SetValue(str(row[16]).strip().upper())
self.sTIPO_ORD = self.tipoord
self.vPAGAM.SetValue(str(row[42]))
if self.vPAGAM.GetValue()=="":
self.vPAGAM.SetValue(cfg.tipopagam)
self.sPAGAM = ""
#self.note.SetValue(row[29])
#self.reg.SetValue(row[30])
self.SelCOMBO(self)
self.OffAnagTxt(self)
self.rbCONFER.Enable(False)
self.rbPREVIS.Enable(False)
if (self.cntr=="modi"):
self.skanag.Enable(True)
self.inte.Show(True)
self.dele.Show(False)
self.dele.Enable(False)
self.modi.Show(True)
self.modi.Enable(True)
self.modi.SetFocus()
if (self.cntr=="new"):
self.codage.SetValue(str(row[38]))
if self.codage.GetValue()!="": self.FndAge(self)
self.canc.Show(False)
self.inte.Show(True)
self.new.Show(False)
self.oktestata.SetFocus()
self.dele.Show(False)
self.dele.Enable(False)
self.modi.Show(True)
self.modi.Enable(False)
#self.ntbk.SetSelection(1)
self.vsrif.SetFocus()
def FndCodCFDest(self, evt):
cnt_rec = 0
val = self.ragsoc3.GetValue().upper()
cod = self.codcf1.GetValue()
sql = """ select * from tblcf where cod = "%s"
and t_cpart = "%s" """
valueSql = int(cod), self.tcpart
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndCodCFDest Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==0):self.Message(cfg.msgdatono,self.ttldest)
elif (cnt_rec==1 and cnt_rec<2): self.FndSelAnagDest(row)
def FndAnagDest(self, evt):
cnt_rec = 0
val = self.ragsoc3.GetValue().title()
cod = self.codcf1.GetValue()
sql = """ select * from tblcf where tag_soc1 like "%s"
and t_cpart = "%s" """
valueSql = '%' + val + '%', self.tcpart
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndAnagDest Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==0):self.Message(cfg.msgdatonull,self.ttldest)
elif (cnt_rec==1 and cnt_rec<2): self.FndSelAnagDest(row)
elif (cnt_rec>1):
try:
import srctblcf
except :
pass
try:
import base.srctblcf
except :
pass
control = [self.tcpart,self.codcf1,self.ragsoc3,self.FndCodCFDest,self.codcf]
win = srctblcf.create(self,control)
win.Centre(wx.BOTH)
win.Show(True)
else:
self.nsrif.SetFocus()
def FndSelAnagDest(self, evt):
row = evt
self.codcf1.SetValue(str(row[1]))
self.ragsoc3.SetValue(str(row[3]).title())
self.ragsoc4.SetValue(str(row[4]).title())
self.indiriz1.SetValue(str(row[6]).title())
cap1 = string.zfill(str(row[7]).strip(),5)
self.cap1.SetValue(cap1)
self.__MDI__.CnvNone(row[8])
self.zona1.SetValue(self.__MDI__.val)
self.__MDI__.CnvNone(row[9])
self.localit1.SetValue(self.__MDI__.val)
self.__MDI__.CnvNone(row[10])
self.pr1.SetValue(self.__MDI__.val)
self.__MDI__.CnvNone(row[11])
self.stato1.SetValue(self.__MDI__.val)
self.vsrif.SetFocus()
def OffArtTxt(self, evt):
self.codart.Enable(False)
self.codbar.Enable(False)
self.ccodart.Enable(False)
self.ccodbar.Enable(False)
self.ctesto.Enable(False) # personalizza testo
self.ccodinfo.Enable(False)
self.descriz.Enable(False)
self.cdescriz.Enable(False)
self.UM.Enable(False)
self.mis.Enable(False)
self.provv.Enable(False)
self.costo.Enable(False)
self.colli.Enable(False)
self.qt.Enable(False)
self.prezzo.Enable(False)
self.sc1.Enable(False)
self.vALIVA.Enable(False)
self.cALIVA.Enable(False)
#self.nriga.Enable(False)
self.peso.Enable(False)
self.totale.Enable(False)
self.vinprod.Enable(False)
self.UM.Enable(False)
self.vMERCE.Enable(False)
self.vIMBAL.Enable(False)
self.vCONFE.Enable(False)
self.modir.Enable(False)
self.okart.Enable(False)
self.modir.Show(True)
self.okart.Show(False)
self.intr.Enable(False)
self.delr.Enable(False)
self.newr.Enable(True)
self.newr.SetFocus()
def OffArtTxtAll(self, evt):
self.lc.SetBackgroundColour(wx.Colour(255, 255, 255))
self.lc.Enable(True)
self.codart.Enable(False)
self.ccodart.Enable(False)
self.codbar.Enable(False)
self.ccodbar.Enable(False)
self.ctesto.Enable(False) # personalizza testo
self.descriz.Enable(False)
self.cdescriz.Enable(False)
self.UM.Enable(False)
#self.mis.Enable(False)
#self.lcosto.Show(False)
#self.costo.Show(False)
self.costo.Enable(False)
#self.prezzo.Enable(False)
#self.sc1.Enable(False)
self.nriga.Enable(False)
#self.qt.Enable(False)
self.peso.Enable(False)
self.totale.Enable(False)
self.vinprod.Enable(False)
self.vMERCE.Enable(False)
self.vIMBAL.Enable(False)
self.vCONFE.Enable(False)
#self.lc.Enable(False)
def OnArtTxt(self, evt):
self.lc.SetBackgroundColour(self.color)
self.lc.Enable(False)
self.codart.Enable(True)
self.ccodart.Enable(True)
self.codbar.Enable(True)
#self.codbar.Show(True)
self.ccodbar.Enable(True)
self.ctesto.Enable(True) # personalizza testo
self.ccodinfo.Enable(True)
self.descriz.Enable(True)
self.cdescriz.Enable(True)
self.UM.Enable(True)
self.mis.Enable(True)
self.provv.Enable(True)
self.prezzo.Enable(True)
self.sc1.Enable(True)
self.colli.Enable(True)
self.qt.Enable(True)
self.vALIVA.Enable(True)
self.cALIVA.Enable(True)
def NewRow(self, evt):
self.OnArtTxt(self)
self.DelArtTxt(self)
self.cntr_row = "new"
if self.ccodbar.GetValue()==False :
self.codart.Show(True)
self.codbar.Show(False)
#self.codart.SetFocus()
self.descriz.SetFocus()
else:
self.codart.Show(False)
self.codbar.Show(True)
self.codbar.SetFocus()
self.newr.Enable(False)
self.intr.Enable(True)
self.modir.Enable(False)
self.okart.Enable(True)
self.modir.Show(False)
self.okart.Show(True)
def ModiRow(self, evt):
self.OnArtTxt(self)
self.cntr_row = "modi"
self.qt.SetFocus()
self.delr.Enable(True)
self.cntr_row = ""
self.modir.Show(False)
self.modir.Enable(False)
self.okart.Show(True)
self.okart.Enable(True)
def DelArtTxt(self, evt):
self.codart.SetValue('')
self.codbar.SetValue('')
self.descriz.SetValue('')
self.UM.SetValue('')
self.prezzo.SetValue('')
self.costo.SetValue('')
self.importo.SetValue('')
self.qt.SetValue('')
#self.volume.SetValue('')
self.peso.SetValue('0,00')
self.colli.SetValue('0,00')
self.prezzo_ag.SetValue('0,00')
#self.vALIVA.SetValue('20')
#self.dALIVA.SetValue("Aliquota 20%")
def CalcSaldo(self,evt):
if self.prezzo_ac.GetValue()=='':self.prezzo_ac.SetValue('0')
if self.totaledoc.GetValue()=='':self.totaledoc.SetValue('0')
totordi = self.totaledoc.GetValue()
accordi = self.prezzo_ac.GetValue()
self.__MDI__.CnvPM(totordi)
totordi = self.__MDI__.val
self.__MDI__.CnvPM(accordi)
accordi = self.__MDI__.val
saldo = totordi-accordi
self.__MDI__.CnvVM(saldo)
self.prezzo_ac1.SetValue(self.__MDI__.val)
def CalcTotale(self,evt):
tot_colli = 0
importo = 0
imponibile = 0
iva = 0
for x in range(self.lc.GetItemCount()):
self.__MDI__.CnvPM(self.getColTxt(x, 20))
colli_row = float(self.__MDI__.val)
tot_colli = colli_row
self.__MDI__.CnvPM(self.getColTxt(x, 5))
imponibile_row = float(self.__MDI__.val)
self.__MDI__.CnvPM(self.getColTxt(x, 6))
iva_row = float(self.__MDI__.val)
if (type(iva_row)==float) :
iva+=imponibile_row*iva_row/100
imponibile+=imponibile_row
importo=imponibile+iva
self.__MDI__.CnvVM(imponibile)
self.totale.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM(importo)
self.totaledoc.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM(tot_colli)
self.tot_colli.SetValue(self.__MDI__.val)
def SelRow(self,evt):
self.intr.Enable(True)
self.modir.Enable(True)
self.newr.Enable(False)
def IntRow(self,evt):
self.OffArtTxt(self)
self.DelArtTxt(self)
self.lc.SetBackgroundColour(wx.Colour(255, 255, 255))
self.lc.Enable(True)
def DelRow(self, evt):
dlg = wx.MessageDialog(self, cfg.msgdelrow, self.ttl,
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if dlg.ShowModal()==wx.ID_YES:
self.lc.DeleteItem(self.row)
self.IntRow(self)
self.CalcTotale(self)
self.newr.SetFocus()
dlg.Destroy()
#self.Close(true)
else:
dlg.Destroy()
def RmpRow(self, evt):
self.lc.InsertStringItem(self.row, self.codart.GetValue())
self.lc.SetStringItem(self.row, 1, self.descriz.GetValue())
self.lc.SetStringItem(self.row, 2, self.qt.GetValue())
self.lc.SetStringItem(self.row, 3, self.prezzo.GetValue())
self.lc.SetStringItem(self.row, 4, self.sc1.GetValue())
self.lc.SetStringItem(self.row, 5, self.importo.GetValue())
self.lc.SetStringItem(self.row, 6, self.vALIVA.GetValue())
self.lc.SetStringItem(self.row, 7, self.UM.GetValue())
self.lc.SetStringItem(self.row, 8, self.mis.GetValue())
self.lc.SetStringItem(self.row, 9, self.nriga.GetValue())
self.lc.SetStringItem(self.row, 10, self.codbar.GetValue())
self.lc.SetStringItem(self.row, 11, self.codmerc.GetValue())
self.lc.SetStringItem(self.row, 12, self.qt_ord.GetValue())
self.lc.SetStringItem(self.row, 13, self.qt_con.GetValue())
self.lc.SetStringItem(self.row, 14, self.qt_eva.GetValue())
self.lc.SetStringItem(self.row, 15, self.prezzo_ag.GetValue())
self.lc.SetStringItem(self.row, 16, self.sc2.GetValue())
self.lc.SetStringItem(self.row, 17, self.sc3.GetValue())
self.lc.SetStringItem(self.row, 18, self.provv.GetValue())
self.lc.SetStringItem(self.row, 19, self.datacons.GetValue())
self.lc.SetStringItem(self.row, 20, self.colli.GetValue())
self.lc.SetStringItem(self.row, 21, self.peso.GetValue())
self.lc.SetStringItem(self.row, 22, self.lst.GetValue())
self.lc.SetStringItem(self.row, 23, self.vPDC.GetValue())
self.lc.SetStringItem(self.row, 24, self.stt_ord2.GetValue())
self.lc.SetStringItem(self.row, 25, self.annodoc.GetValue())
self.lc.SetStringItem(self.row, 26, self.tipodoc.GetValue())
self.lc.SetStringItem(self.row, 27, self.datadoc.GetValue())
self.lc.SetStringItem(self.row, 28, self.numdoc.GetValue())
self.lc.SetStringItem(self.row, 29, self.campo1_art.GetValue())
self.lc.SetStringItem(self.row, 30, self.campo2_art.GetValue())
def OkRow(self, evt):
cnt_val = 0
valprezzo = self.prezzo.GetValue().replace(",","")
valprezzo = valprezzo.replace("-","")
if (valprezzo!="" and valprezzo.isdigit()==True):
self.__MDI__.CnvPM5(self.prezzo.GetValue())
vprezzo = self.__MDI__.val
cnt_val+=1
else:
self.Message(cfg.msgprezno,self.ttl)
self.prezzo.SetFocus()
sc1 = self.sc1.GetValue().replace(",","")
if (sc1!="" and sc1.isdigit()==True):
self.__MDI__.CnvPM(self.sc1.GetValue())
vsc1 = self.__MDI__.val
cnt_val+=1
else:
self.Message(cfg.msgscno,self.ttl)
self.sc1.SetFocus()
qt = self.qt.GetValue().replace(",","")
if (qt!="" and qt.isdigit()==True):
self.__MDI__.CnvPM(self.qt.GetValue())
vqt = self.__MDI__.val
cnt_val+=1
else:
self.Message(cfg.msgqtno,self.ttl)
self.qt.SetFocus()
if (cnt_val==3):
importo = (vprezzo*vqt)-(vprezzo*vqt*vsc1/100)
self.__MDI__.CnvVM(importo)
self.importo.SetValue(self.__MDI__.val)
self.OffArtTxt(self)
if ( self.cntr_row=="new"):
self.row = self.lc.GetItemCount()
nriga = self.row + 1
self.nriga.SetValue(str(nriga*10))
self.RmpRow(self)
if ( self.cntr_row==""):
self.RmpRow(self)
self.lc.DeleteItem(self.row + 1)
self.lc.SetItemState(self.row-1,
wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
self.CalcTotale(self)
self.newr.Enable(True)
self.newr.SetFocus()
self.modir.Enable(False)
self.okart.Enable(False)
self.cntr_row = ""
def FndSelArt(self, evt):
row = evt
self.codart.SetValue(str(row[0]))
self.descriz.SetValue(str(row[2]))
self.UM.SetValue(str(row[3]))
self.mis.SetValue(str(row[4]))
self.__MDI__.CnvVM5(row[7])
self.costo.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM5(row[5])
self.prezzo1.SetValue(self.__MDI__.val)
self.__MDI__.CnvVM5(row[6])
self.prezzo2.SetValue(self.__MDI__.val)
self.prezzo.SetValue(self.prezzo1.GetValue())
if self.tcpart=='F':
self.prezzo.SetValue(self.costo.GetValue())
self.lcosto.Show(False)
self.costo.Show(False)
#self.lprovv.Show(False)
#self.provv.Show(False)
self.codbar.SetValue(str(row[1]))
self.vALIVA.SetValue(str(row[11]))
self.vMERCE.SetValue(str(row[8]))
self.vIMBAL.SetValue(str(row[23]))
self.vCONFE.SetValue(str(row[24]))
self.peso.SetValue(str(row[25]))
#self.volume.SetValue(str(row[26]))
self.vinprod.SetValue(str(row[29]))
self.OffArtTxtAll(self)
self.vALIVA.SetFocus()
def FndCodInfo(self, evt):
cod = self.codart.GetValue().upper()
des = self.descriz.GetValue().upper()
try:
import infoart
except :
pass
try:
import base.infoart
except :
pass
#control = [cod,des]
#win = infoart.create(self,control)
if cod!="" and des!="" :
control = [cod,des]
win = infoart.create(self,control)
win.Centre(wx.BOTH)
win.Show(True)
else: self.Message(_("ATTENZIONE!!! campo di ricerca vuoto") ,self.ttl)
def FndCodArt(self, evt):
cnt_rec = 0
cod = self.codart.GetValue().upper()
sql = """ select * from articoli where cod like "%s" """
valueSql = '%' + cod + '%'
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndCodArt Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec>=1000): self.Message(cfg.msgfnd + str(cnt_rec) ,self.ttl)
elif (cnt_rec==0):self.Message(cfg.msgdatonull,self.ttl)
elif (cnt_rec==1 and cnt_rec<2):self.FndSelArt(row)
elif (cnt_rec>1):
try:
import srcart
except :
pass
try:
import base.srcart
except :
pass
control = [self.tblart,self.codart,self.descriz,self.FndArt]
win = srcart.create(self,control)
win.Centre(wx.BOTH)
win.Show(True)
def FndCodBar(self, evt):
cnt_rec = 0
cod = self.codbar.GetValue()
sql = """ select * from articoli where codbar = "%s" """
valueSql = cod
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndCodBar Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==0):self.Message(cfg.msgdatono,self.ttl)
elif (cnt_rec==1 and cnt_rec<2): self.FndSelArt(row)
def FndDesArt(self, evt):
cnt_rec = 0
val = self.descriz.GetValue().upper()
sql = """ select * from articoli
where descriz like "%s" order by descriz """
valueSql = '%' + val + '%'
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("Vendite"," FndDesArt Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==0):self.Message(cfg.msgdatonull,self.ttl)
elif (cnt_rec==1 and cnt_rec<2): self.FndSelArt(row)
elif (cnt_rec>1):
import base.srcart
control = [self.tblart,self.codart,self.descriz,self.FndArt]
win = base.srcart.create(self,control)
win.Centre(wx.BOTH)
win.Show(True)
def FndArt(self, evt):
cnt_rec = 0
val = self.descriz.GetValue().upper()
cod = self.codart.GetValue().upper()
sql = """ select * from articoli where cod = "%s" """
valueSql = cod
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
cnt_rec+=1
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," FndArt Error %s" % (msg))
self.CnAz.commit()
if (cnt_rec==0):self.Message(cfg.msgdatonull,self.ttl)
elif (cnt_rec==1 and cnt_rec<2): self.FndSelArt(row)
def FndArtSel(self, evt):
self.okart.Enable(True)
self.okart.SetFocus()
def New(self, evt):
if self.cntr!="modi" :#modifica
self.IntTestata(self)
self.NewTxt(self)
self.cntr = "new"
else:
self.cntr = "new"
self.oldnum_ord = int(self.num_ord.GetValue())
self.oldvTIPO_ORD = self.vTIPO_ORD.GetValue()
if self.rbCONFER.GetValue()==True: self.vTIPO_ORD.SetValue("OC")
self.SelCOMBO(self)
registro = self.vTIPO_ORD.GetValue()
anno = self.anno.GetValue()
chiave = "RORD"
sql = """ select * from libriaz
where chiave = "%s" and anno = "%s" and registro = "%s" """
valueSql = chiave, anno, registro
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
while (1):
row = cr.fetchone()
if row==None:
break
if (row[3]==None or row[3]=="") :
self.num_ord.SetValue('1')
else :
self.num_ord.SetValue(str(int(row[3]) + 1))
self.val_numord = int(self.num_ord.GetValue())
if (row[16]==None or row[16]=="") :
self.vdata_ord.SetValue(self.data)
else:
self.vdata_ord.SetValue(row[16])
self.data_ord.SetValue(self.data)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," New num_ord Error %s" % (msg))
self.CnAz.commit()
num_ord = int(self.num_ord.GetValue())
def Message(self, qst, ttl):
dlg = wx.MessageDialog(self, qst, ttl,
wx.OK | wx.ICON_EXCLAMATION)
if dlg.ShowModal()==wx.ID_OK:
dlg.Destroy()
def Close(self, evt):
if (self.ragsoc2.GetValue()!="" or self.ragsoc1.GetValue()!=""):
dlg = wx.MessageDialog(self, cfg.msgesci, self.ttl,
wx.YES_NO | wx.NO_DEFAULT |wx.ICON_QUESTION)
if dlg.ShowModal()==wx.ID_YES:
dlg.Destroy()
self.AggMenu(True,self.IDMENU)
wx.GetApp().GetPhasisMdi().CloseTabObj(self)
#self.Destroy()
else:
dlg.Destroy()
else:
self.AggMenu(True,self.IDMENU)
wx.GetApp().GetPhasisMdi().CloseTabObj(self)
#self.Destroy()
def Save(self, evt):
if(self.lc.GetItemCount()!=0 and self.codcf.GetValue()!=""):
vcntr = self.cntr
self.cntr = ""
vTIPO_ORD = self.vTIPO_ORD.GetValue()
vanno = self.anno.GetValue()
vnum_ord = int(self.num_ord.GetValue())
vdata_ord = self.data_ord.GetValue()
chiave = "RORD"
registro = vTIPO_ORD
vcod_cf = self.codcf.GetValue()
vragsoc2 = self.ragsoc2.GetValue()
vragsoc1 = self.ragsoc1.GetValue()
vindiriz = self.indiriz.GetValue()
vcap = self.cap.GetValue()
vzona = self.zona.GetValue()
vlocalit = self.localit.GetValue()
vpr = self.pr.GetValue()
vstato = self.stato.GetValue()
vcodcf1 = self.codcf1.GetValue()
vindiriz1 = self.indiriz1.GetValue()
vragsoc3 = self.ragsoc3.GetValue()
vragsoc4 = self.ragsoc4.GetValue()
vcap1 = self.cap1.GetValue()
vzona1 = self.zona1.GetValue()
vlocalit1 = self.localit1.GetValue()
vpr1 = self.pr1.GetValue()
if (vpr1=='' or vpr1=='None')and (vcodcf1!=''): vpr1 = '--'
vstato1 = self.stato1.GetValue()
vstt_ord1 = self.stt_ord1.GetValue()
vstt_ord2 = self.stt_ord2.GetValue()
vlst = int(self.lst.GetValue())
vvsord = self.vs_ord.GetValue()
vvsdata = self.vs_data.GetValue()
vdiv = self.vDIVvend.GetValue()
vcodage = self.codage.GetValue()
vPRIO = int(self.vPRIO.GetValue())
vPAGAM = self.vPAGAM.GetValue()
vconseg = self.vCONSEG.GetValue()
vtrasp = self.trasp.GetValue()
vcod_vet = self.cod_vet.GetValue()
vvsrif = self.vsrif.GetValue()
vnsrif = self.nsrif.GetValue()
vrag_ord = self.rag_ord.GetValue()
vcampo1 = self.campo1.GetValue()
vcampo2 = self.campo2.GetValue()
vnote = self.note.GetValue()
vo1_1 = vTIPO_ORD,vanno,vnum_ord,vdata_ord,vcod_cf,vragsoc1,vragsoc2
vo1_1_modi = vdata_ord,vcod_cf,vragsoc1,vragsoc2
vo1_2 = vindiriz,vcap,vzona,vlocalit,vpr,vstato,vcodcf1
vo1_3 = vragsoc3,vragsoc4,vindiriz1,vcap1,vzona1,vlocalit1,vpr1,vstato1
vo1_4 = vstt_ord1,vlst,vvsord,vvsdata
vo1_5 = vdiv,vcodage,vPRIO,vPAGAM,vconseg,vtrasp,vcod_vet
vo1_6 = vvsrif,vnsrif,vrag_ord,vcampo1,vcampo2,vnote
vo1_6_modi = vTIPO_ORD,vanno,vnum_ord
vordi1 = vo1_1 + vo1_2 + vo1_3 + vo1_4 + vo1_5 + vo1_6
vordi1_modi = vo1_1_modi + vo1_2 + vo1_3 + vo1_4 + \
vo1_5 + vo1_6
vvaspet = self.vASPET.GetValue()
vtot_colli = self.tot_colli.GetValue()
self.__MDI__.CnvPM(vtot_colli)
vtot_colli = float(self.__MDI__.val)
vtot_peso = self.tot_peso.GetValue()
self.__MDI__.CnvPM(vtot_peso)
vtot_peso = float(self.__MDI__.val)
vsc1 = 0
vsc2 = 0
vsc3 = 0
vprez_ac = self.prezzo_ac.GetValue()
self.__MDI__.CnvPM(vprez_ac)
vprez_ac = float(self.__MDI__.val)
vprez_ac1 = self.prezzo_ac1.GetValue()
self.__MDI__.CnvPM(vprez_ac1)
vprez_ac1 = float(self.__MDI__.val)
vvPDC_SC = self.vPDC_SC.GetValue()
vcampo1_calce = 0#self.campo1_calce.GetValue()
vcampo2_calce = 0#self.campo2_calce.GetValue()
vnote_calce = self.note_calce.GetValue()
vo3_1 = vvaspet,vtot_colli,vtot_peso
vo3_2 = vsc1,vsc2,vsc3,vvPDC_SC,vprez_ac,vprez_ac1
vo3_3 = vcampo1_calce,vcampo2_calce,vnote_calce
vordi3 = vo3_1 + vo3_2 + vo3_3
valueSql = vordi1 + vordi3
valueSql_modi = vordi1_modi + vordi3 + vo1_6_modi
if(vcntr=="new"):
try:
cr = self.CnAz.cursor()
sql = """ insert into ordi1
values("%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s","%s") """
cr.execute(sql % valueSql)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," Save insert ordi1 Error %s" % (msg))
self.CnAz.commit()
# modifica
if self.oldnum_ord!="":
valueSql = "E", self.oldvTIPO_ORD, self.anno.GetValue(), self.oldnum_ord
try:
cr = self.CnAz.cursor()
sql1old = """ update ordi1 set stt_ord = "%s"
where tipo_ord = "%s" and anno = "%s"
and num_ord = "%s" """
cr.execute(sql1old % valueSql)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," Save update old ordi1 Error %s" % (msg))
self.CnAz.commit()
try:
cr = self.CnAz.cursor()
sql1old = """ update ordi2 set stt_ord = "%s"
where tipo_ord = "%s" and anno = "%s"
and num_ord = "%s" """
cr.execute(sql1old % valueSql)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," Save update old ordi2 Error %s" % (msg))
self.CnAz.commit()
if(vcntr=="modi"):
try:
cr = self.CnAz.cursor()
sql = """ update ordi1 set data_ord = "%s", cod_cf = "%s" ,
rag_soc1 = "%s", rag_soc2 = "%s", indiriz = "%s",
cap = "%s", zona = "%s", localit = "%s", pr = "%s",
stato = "%s", cod_dest = "%s", rag_soc3 = "%s",
rag_soc4 = "%s", indiriz1 = "%s", cap1 = "%s",
zona1 = "%s", localit1 = "%s", pr1 = "%s",
stato1 = "%s", stt_ord = "%s", lst = "%s",
vsord = "%s",vsdata = "%s", vdiv = "%s",
cod_age = "%s", prio = "%s", pagam = "%s",
conse = "%s", trasp = "%s", cod_vet = "%s",
vsrif = "%s", nsrif = "%s", rag_ord = "%s",
campo1 = "%s", campo2 = "%s", note = "%s",
aspet = "%s", colli = "%s", peso = "%s",
sc1 = "%s", sc2 = "%s", sc3 = "%s", pdc_sc = "%s",
prez_ac = "%s", prez_ac1 = "%s", campo3 = "%s",
campo4 = "%s", note1 = "%s"
where tipo_ord = "%s" and anno = "%s"
and num_ord = "%s" """
cr.execute(sql % valueSql_modi)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," Save update ordi1 Error %s" % (msg))
self.CnAz.commit()
valueSql = vTIPO_ORD, vanno, int(vnum_ord)
try:
cr = self.CnAz.cursor()
sql = """ delete from ordi2
where tipo_ord = "%s" and anno = "%s"
and num_ord = "%s" """
cr.execute(sql % valueSql)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," Dele modi ordi2 Error %s" % (msg))
self.CnAz.commit()
nrow = self.lc.GetItemCount()
for row in range(nrow):
vcod_mag = 1
vnriga = int(self.getColTxt(row, 9))
vcodart = self.getColTxt(row, 0)
vcodbar = self.getColTxt(row, 10)
vMERCE = self.getColTxt(row, 11)
vdescriz = self.getColTxt(row, 1)
vUM = self.getColTxt(row, 7)
if vUM=='':vUM = '--'
vmis = self.getColTxt(row, 8)
vqt_ord = self.getColTxt(row, 2)
self.__MDI__.CnvPM(vqt_ord)
vqt_ord = float(self.__MDI__.val)
vqt_con = 0
vqt_eva = 0
vprez_un = self.getColTxt(row, 3)
self.__MDI__.CnvPM5(vprez_un)
vprez_un = float(self.__MDI__.val)
vprez_ag = self.getColTxt(row, 15)
self.__MDI__.CnvPM5(vprez_ag)
vprez_ag = float(self.__MDI__.val)
vtot_riga = self.getColTxt(row, 5)
self.__MDI__.CnvPM(vtot_riga)
vtot_riga = float(self.__MDI__.val)
vALIVA = self.getColTxt(row, 6)
vsc1 = self.getColTxt(row, 4)
self.__MDI__.CnvPM(vsc1)
vsc1 = float(self.__MDI__.val)
vsc2 = 0
vsc3 = 0
vprovv = self.getColTxt(row, 18)
self.__MDI__.CnvPM(vprovv)
vprovv = float(self.__MDI__.val)
vdatacons = self.getColTxt(row, 19)
vcolli = self.getColTxt(row, 20)
self.__MDI__.CnvPM(vcolli)
vcolli = float(self.__MDI__.val)
vpeso = self.getColTxt(row, 21)
self.__MDI__.CnvPM(vpeso)
vpeso = float(self.__MDI__.val)
vlst = int(self.getColTxt(row, 22))
vpdc = self.getColTxt(row, 23)
vstt_ord2 = self.getColTxt(row, 24)
vannodoc = self.getColTxt(row, 25)
vtipodoc = self.getColTxt(row, 26)
vdatadoc = self.getColTxt(row, 27)
vnumdoc = self.getColTxt(row, 28)
vcampo1_corpo = self.getColTxt(row, 29)
vcampo2_corpo = self.getColTxt(row, 30)
if self.oldnum_ord!="": vstt_ord2 ="C"
vo2_1 = vTIPO_ORD,vanno,vnum_ord,vcod_mag,vnriga,vcodart,vcodbar,vMERCE
vo2_2 = vdescriz,vUM,vmis,vqt_ord,vqt_con,vqt_eva,vprez_un,vprez_ag
vo2_3 = vtot_riga,vALIVA,vsc1,vsc2,vsc3,vprovv,vdatacons
vo2_4 = vcolli,vpeso,vlst,vpdc,vstt_ord2,vannodoc,vtipodoc,vdatadoc,vnumdoc
vo2_5 = vcampo1_corpo,vcampo2_corpo
valueSql = vo2_1 + vo2_2 + vo2_3 + vo2_4 + vo2_5
try:
cr = self.CnAz.cursor()
sql = """ insert into ordi2
values("%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s","%s","%s","%s","%s","%s","%s",
"%s","%s") """
cr.execute(sql % valueSql)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," Save insert ordi2 Error %s" % (msg))
self.CnAz.commit()
if vcntr!="modi" :
valueSql = vnum_ord, vdata_ord, chiave, vanno, registro
try:
cr = self.CnAz.cursor()
sql = """ update libriaz set ultnum = "%s", udatreg = "%s"
where chiave = "%s" and anno = "%s"
and registro = "%s" """
cr.execute(sql % valueSql)
except StandardError, msg:
self.__MDI__.MsgErr("ordini"," Save update libriaz Error %s" % (msg))
self.CnAz.commit()
self.OffAnagTxt(self)
self.OffArtTxt(self)
self.voktestata = 0
self.ok.Show(False)
self.oktestata.Show(False)
self.inte.Show(True)
self.inte.Enable(True)
self.new.Enable(True)
self.new.Show(True)
self.dele.Enable(False)
self.stampa.Enable(True)
self.stampa.SetFocus()
self.cntr = ""
self.oldnum_ord = ""
self.oldvTIPO_ORD = ""
else:
self.Message(cfg.msgass,self.ttl)
def StpSkAnag(self, evt):
#<daniele>
codcf = self.codcf.GetValue()
import skprint
skprint.stampaDoc(
conn = self.CnAz , #connessione
tipo = 'sanag' + self.tcpart.lower(), #tipo documento e parametro
parametriSql = (self.tcpart, codcf),
#datiazienda = self.dzDatiAzienda,
anteprima = True )
#</daniele>
#<daniele>
def Stampa(self, evt):
anno = self.anno.GetValue()
num_ord = self.num_ord.GetValue()
tipo_ord = self.vTIPO_ORD.GetValue()
tipo = self.vTIPO_ORD.GetValue()
if tipo_ord=="PC": tipo = "OC"
if tipo_ord=="PF": tipo = "OF"
import skprint
skprint.stampaDoc(
conn = self.CnAz , #connessione
tipo = tipo, #tipo documento e parametro
parametriSql = (anno,tipo_ord,num_ord),
datiazienda = self.dzDatiAzienda,
anteprima = True )
#</daniele>
# personalizza testo
def OpenTesto(self, evt):
#print "OpenTesto"
try:
import testo
except :
pass
try:
import base.testo
except :
pass
#import testo
control = [self.dtesto,self.InsTesto]
win = testo.create(self,control)
win.Centre(wx.BOTH)
win.Show(True)
# personalizza testo
def InsTesto(self, evt):
val = self.dtesto.GetValue()
nriga = 0
ldescriz = [ x for x in val.split('\n') if x != '']
#ldescriz = ldescriz.reverse()
#print ldescriz
for descriz in ldescriz:
nriga+=10
self.RmpTesto(descriz.upper())
self.IntRow(self)
# personalizza testo
def RmpTesto(self, evt):
#print evt
row=0
self.row = self.lc.GetItemCount()
nriga =str((self.row+1)*10)
descriz = evt
self.lc.InsertStringItem(self.row, '')
self.lc.SetStringItem(self.row, 1, descriz)
self.lc.SetStringItem(self.row, 2, '0')
self.lc.SetStringItem(self.row, 3, '0')
self.lc.SetStringItem(self.row, 4, '0')
self.lc.SetStringItem(self.row, 5, '0')
self.lc.SetStringItem(self.row, 6, '')
self.lc.SetStringItem(self.row, 7, '')
self.lc.SetStringItem(self.row, 8, '')
self.lc.SetStringItem(self.row, 9, nriga)
self.lc.SetStringItem(self.row, 10, '')
self.lc.SetStringItem(self.row, 11, '')
self.lc.SetStringItem(self.row, 12, '0')
self.lc.SetStringItem(self.row, 13, '0')
self.lc.SetStringItem(self.row, 14, '0')
self.lc.SetStringItem(self.row, 15, '0')
self.lc.SetStringItem(self.row, 16, '0')
self.lc.SetStringItem(self.row, 17, '0')
self.lc.SetStringItem(self.row, 18, '')
self.lc.SetStringItem(self.row, 19, '')
self.lc.SetStringItem(self.row, 20, '0')
self.lc.SetStringItem(self.row, 21, '0')
self.lc.SetStringItem(self.row, 22, '1')
self.lc.SetStringItem(self.row, 23, '')
self.lc.SetStringItem(self.row, 24, '')
self.lc.SetStringItem(self.row, 25, '')
self.lc.SetStringItem(self.row, 26, '')
self.lc.SetStringItem(self.row, 27, '')
self.lc.SetStringItem(self.row, 28, '')
self.lc.SetStringItem(self.row, 29, '')
self.lc.SetStringItem(self.row, 30, '')
def is_look(self):
if (self.cntr!="new" and self.cntr!="modi"): return False
else : return True
def data_reload(self,rec,cntrp):
self.rec=rec
self.tipoord=cntrp
self.tcpart = "C"
if self.tipoord=='OF': self.tcpart = "F"
self.Start(self)
| phasis/phasis | phasis/ordi/ordini.py | Python | gpl-2.0 | 131,879 |
# coding=utf-8
import unittest
"""482. License Key Formatting
https://leetcode.com/problems/license-key-formatting/description/
You are given a license key represented as a string S which consists only
alphanumeric character and dashes. The string is separated into N+1 groups by
N dashes.
Given a number K, we would want to reformat the strings such that each group
contains _exactly_ K characters, except for the first group which could be
shorter than K, but still must contain at least one character. Furthermore,
there must be a dash inserted between two groups and all lowercase letters
should be converted to uppercase.
Given a non-empty string S and a number K, format the string according to the
rules described above.
**Example 1:**
**Input:** S = "5F3Z-2e-9-w", K = 4
**Output:** "5F3Z-2E9W"
**Explanation:** The string S has been split into two parts, each part has 4 characters.
Note that the two extra dashes are not needed and can be removed.
**Example 2:**
**Input:** S = "2-5g-3-J", K = 2
**Output:** "2-5G-3J"
**Explanation:** The string S has been split into three parts, each part has 2 characters except the first part as it could be shorter as mentioned above.
**Note:**
1. The length of string S will not exceed 12,000, and K is a positive integer.
2. String S consists only of alphanumerical characters (a-z and/or A-Z and/or 0-9) and dashes(-).
3. String S is non-empty.
Similar Questions:
"""
class Solution(object):
def licenseKeyFormatting(self, S, K):
"""
:type S: str
:type K: int
:rtype: str
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
| openqt/algorithms | leetcode/python/lc482-license-key-formatting.py | Python | gpl-3.0 | 1,788 |
class World:
"""Znaki z ktorych sklada sie mapa swiata."""
EXIT = 'W'
"""Znak umieszczany mapie oznaczajacy pole z wyjsciem."""
CAVE = 'J'
"""Znak umieszczany mapie oznaczajacy pole z jama."""
EMPTY = '.'
"""Znak umieszczany mapie oznaczajacy puste pole."""
| uHappyLogic/lost-wumpus | world.py | Python | mit | 289 |
#=============================================================================
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>'''
#
# Date: 2015
#
#===============================================================================
import MOSFoundation
def app2():
HOSAnalogSetup()
HOSDigitalSetup(3)
HOSDigitalListen(2,buttonListener)
while(True):
MOSSleep (1000)
result = HOSAnalogRead(0)
MOSSleep (1000)
def buttonListener():
for x in range (4):
HOSDigitalOn(3)
MOSSleep(500)
HOSDigitalOff(3)
MOSSleep (500)
| hdlj/MongooseOS | Resource/Example2/app2.py | Python | gpl-3.0 | 1,153 |
from pprint import pprint as pp
from RMPY import RMRigTools
from RMPY import nameConvention
from RMPY import RMRigShapeControls
import re
import pymel.core as pm
from RMPY.AutoRig.Hand import RMGenericHandStructure
class RMGenericHandRig(object):
def __init__(self, NameConv=None):
if not NameConv:
self.NameConv = nameConvention.NameConvention()
else:
self.NameConv = NameConv
self.GHS = RMGenericHandStructure.GenericHandJointStructure(NameConv=NameConv)
self.name_conv = RMRigTools.RMRigTools(NameConv=NameConv)
self.fingerRoot = {
"middle": None,
"ring": None,
"pinky": None,
"index": None,
"thumb": None
}
self.PalmReferencePoints = {
"middle": None,
"ring": None,
"pinky": None,
"index": None,
"thumb": None
}
self.PalmFingerControlGrp = {
"middle": None,
"ring": None,
"pinky": None,
"index": None,
"thumb": None
}
self.PalmResetPoint = None
self.PalmControl = None
self.fingerControlsReset = []
self.fingerContols = []
self.MainKinematics = None
self.MainControl = None
def CreateHandRig(self, PalmReferencePoint, PalmControl=None):
self.CreateHandStructure(PalmReferencePoint)
for fingers in self.GHS.fingers:
self.CreateFingerSquareRig(fingers)
self.CreatePalmRig(PalmControl=PalmControl)
RMRigTools.RMParentArray(self.PalmControl, self.fingerControlsReset)
palmLen = RMRigTools.RMPointDistance(self.PalmControl, self.GHS.fingerRoots[0])
pm.parentConstraint(self.MainKinematics, self.GHS.palmJoint)
pm.parentConstraint(self.MainKinematics, self.MainControl)
self.NameConv.rename_set_from_name(self.GHS.palmJoint, "sknjnt", "objectType")
for eachFinger in self.GHS.fingers:
self.NameConv.rename_set_from_name(eachFinger, "sknjnt", "objectType")
# self.PalmControl
# RMRigShapeControls.create_box_ctrl(self.GHS.palmJoint, Yratio = .5, size = palmLen, NameConv = NameConv)
def CreateHandStructure(self, PalmReferencePoint):
self.GHS.CreateHandJointStructure(PalmReferencePoint)
self.IdentifyJoints(self.GHS.fingerRoots)
def CreatePalmRig(self, PalmControl=None):
if self.NameConv.get_from_name(self.GHS.palmJoint, "side") == "L":
sideVariation = -1
else:
sideVariation = 1
self.CreatePalmReferencePoints()
if PalmControl == None:
palmResetPoint, PalmControl = RMRigShapeControls.RMCircularControl(self.GHS.palmJoint)
else:
palmResetPoint = pm.group(empty=True, name="palmControl")
self.NameConv.rename_based_on_base_name(self.GHS.palmJoint, palmResetPoint,
name=palmResetPoint)
RMRigTools.RMAlign(self.GHS.palmJoint, palmResetPoint, 3)
self.PalmControl = PalmControl
self.PalmResetPoint = palmResetPoint
self.MainControl = palmResetPoint
# palmResetPoint = self.NameConv.RMRenameSetFromName(palmResetPoint,"palmControls","Name")
self.RMaddPalmControls(self.PalmControl)
RMRigTools.RMLockAndHideAttributes(self.PalmControl, "0000000000")
pinky = self.GHS.fingerJointsByName("pinky")
if pinky:
self.PalmFingerControlGrp["pinky"] = self.name_conv.RMCreateGroupOnObj(pinky[0])
RMRigTools.RMChangeRotateOrder(pinky, "yxz")
RMRigTools.RMConnectWithLimits("%s.Spread" % self.PalmControl,
'%s.rotateZ' % self.PalmFingerControlGrp["pinky"],
[[-10, sideVariation * 10], [0, 0], [10, sideVariation * -60]])
ring = self.GHS.fingerJointsByName("ring")
if ring:
self.PalmFingerControlGrp["ring"] = self.name_conv.RMCreateGroupOnObj(ring[0])
RMRigTools.RMChangeRotateOrder(ring, "yxz")
RMRigTools.RMConnectWithLimits("%s.Spread" % self.PalmControl,
'%s.rotateZ' % self.PalmFingerControlGrp["ring"],
[[-10, sideVariation * 5], [0, 0], [10, sideVariation * -30]])
middle = self.GHS.fingerJointsByName("middle")
if middle:
self.PalmFingerControlGrp["middle"] = self.name_conv.RMCreateGroupOnObj(middle[0])
RMRigTools.RMChangeRotateOrder(middle, "yxz")
RMRigTools.RMConnectWithLimits("%s.Spread" % self.PalmControl,
'%s.rotateZ' % self.PalmFingerControlGrp["middle"],
[[-10, 0], [0, 0], [10, sideVariation * -5]])
index = self.GHS.fingerJointsByName("index")
if index:
self.PalmFingerControlGrp["index"] = self.name_conv.RMCreateGroupOnObj(index[0])
RMRigTools.RMChangeRotateOrder(index, "yxz")
RMRigTools.RMConnectWithLimits("%s.Spread" % self.PalmControl,
'%s.rotateZ' % self.PalmFingerControlGrp["index"],
[[-10, sideVariation * -5], [0, 0], [10, sideVariation * 30]])
thumb = self.GHS.fingerJointsByName("thumb")
if thumb:
self.PalmFingerControlGrp["thumb"] = self.name_conv.RMCreateGroupOnObj(thumb[0])
RMRigTools.RMChangeRotateOrder(thumb, "yxz")
RMRigTools.RMConnectWithLimits("%s.Spread" % self.PalmControl,
'%s.rotateZ' % self.PalmFingerControlGrp["thumb"],
[[-10, sideVariation * -10], [0, 0], [10, sideVariation * 60]])
for eachFingerName in self.fingerRoot:
if eachFingerName != 'thumb':
RMRigTools.RMConnectWithLimits("%s.PalmBend" % self.PalmControl,
'%s.rotateY' % self.PalmFingerControlGrp[eachFingerName],
[[-10, 90], [0, 0], [10, -90]])
RMRigTools.RMConnectWithLimits("%s.Twist" % self.PalmControl,
'%s.rotateX' % self.PalmReferencePoints[eachFingerName],
[[-10, sideVariation * 45], [0, 0], [10, sideVariation * -45]])
RMRigTools.RMConnectWithLimits("%s.PalmCup" % self.PalmControl, '%s.rotateX' % self.PalmReferencePoints["pinky"],
[[0, 0], [10, sideVariation * 50]])
RMRigTools.RMConnectWithLimits("%s.PalmCup" % self.PalmControl, '%s.rotateX' % self.PalmReferencePoints["ring"],
[[0, 0], [10, sideVariation * 25]])
RMRigTools.RMConnectWithLimits("%s.PalmCup" % self.PalmControl, '%s.rotateX' % self.PalmReferencePoints["middle"],
[[0, 0], [10, sideVariation * 5]])
RMRigTools.RMConnectWithLimits("%s.PalmCup" % self.PalmControl, '%s.rotateX' % self.PalmReferencePoints["index"],
[[0, 0], [10, sideVariation * -30]])
RMRigTools.RMConnectWithLimits("%s.PalmCup" % self.PalmControl, '%s.rotateX' % self.PalmReferencePoints["thumb"],
[[0, 0], [10, sideVariation * -60]])
def CreatePalmReferencePoints(self):
HandPalm = self.name_conv.RMCreateGroupOnObj(self.GHS.palmJoint, Type="world")
for keys in self.fingerRoot:
childGroup = self.name_conv.RMCreateGroupOnObj(HandPalm, Type="child")
self.NameConv.rename_set_from_name(childGroup, keys, 'name', mode='add')
pm.parentConstraint(childGroup, self.fingerRoot[keys], maintainOffset=True)
self.PalmReferencePoints[keys] = childGroup
self.MainKinematics = HandPalm
def CreateFingerSquareRig(self, Finger):
if self.NameConv.get_from_name(Finger[0], "side") == "L":
sideVariation = 1
else:
sideVariation = -1
BoxResetPoint, BoxControl = RMRigShapeControls.RMCreateBoxCtrl(Finger[len(Finger) - 1], ParentBaseSize=True,
Xratio=.5, Yratio=.5, Zratio=.5)
self.RMaddFinguerControls(BoxControl)
pm.makeIdentity(BoxControl, apply=True, r=False, t=True, s=True, n=0)
pm.parentConstraint(Finger[len(Finger) - 1], BoxResetPoint)
RMRigTools.RMLockAndHideAttributes(BoxControl, "0000000000")
RMRigTools.RMConnectWithLimits("%s.MidUD" % BoxControl, "%s.rotateY" % Finger[0],
[[-10, 100], [0, 0], [10, -100]])
RMRigTools.RMConnectWithLimits("%s.MidLR" % BoxControl, "%s.rotateZ" % Finger[0],
[[-10, sideVariation * 120], [0, 0], [10, sideVariation * -127]])
RMRigTools.RMConnectWithLimits("%s.MidTwist" % BoxControl, "%s.rotateX" % Finger[0],
[[-10, sideVariation * 90], [0, 0], [10, sideVariation * -90]])
index = 1
for eachjoint in range(0, len(Finger) - 1):
RMRigTools.RMConnectWithLimits("%s.UD%s" %(BoxControl, index), "%s.rotateY" % Finger[eachjoint],
[[-10, 100], [0, 0], [10, -100]])
RMRigTools.RMConnectWithLimits("%s.LR%s" % (BoxControl, index), "%s.rotateZ" % Finger[eachjoint],
[[-10, sideVariation * 120], [0, 0], [10, sideVariation * -127]])
RMRigTools.RMConnectWithLimits("%s.Twist%s" % (BoxControl, index), "%s.rotateX" % Finger[eachjoint],
[[-10, sideVariation * 90], [0, 0], [10, sideVariation * -90]])
index += 1
self.fingerControlsReset.append(BoxResetPoint)
self.fingerContols.append(BoxControl)
def IdentifyJoints(self, fingerRootArray):
for fingers in self.fingerRoot:
for Roots in fingerRootArray:
if re.search('%s' % fingers, '%s' % Roots):
self.fingerRoot[fingers] = Roots
def IdentifyByString(self, IDstring, fingerList):
for eachFinger in fingerList:
if re.search(IDstring, eachFinger):
return eachFinger
def RMaddFinguerControls(self, Object):
pm.addAttr(Object, at="float", ln="MidUD", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="UD1", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="UD2", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="UD3", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="MidLR", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="LR1", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="LR2", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="LR3", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="MidTwist", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="Twist1", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="Twist2", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="Twist3", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="enum", ln="Secondary", k=1, en="Off:On")
def RMaddPalmControls(self, Object):
pm.addAttr(Object, at="float", ln="PalmBend", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="PalmCup", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="Spread", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
pm.addAttr(Object, at="float", ln="Twist", hnv=1, hxv=1, h=0, k=1, smn=-10, smx=10)
| rendermotion/RMPY | AutoRig/Hand/RMGenericHandRig.py | Python | lgpl-3.0 | 12,164 |
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class of library packagers.
This is the abstract base class for modules that can emit a package of files.
The two intended implementations are Zip files and direct to the file system.
"""
__author__ = 'aiuto@google.com (Tony Aiuto)'
import os
class LibraryPackage(object):
"""The library package."""
def __init__(self):
"""Create a new LibraryPackage."""
self._file_path_prefix = ''
def StartFile(self, name):
"""Start writing a named file to the package.
Subclasses must implement this.
Args:
name: (str) path which will identify the contents in the archive.
Returns:
A file-like object to write the contents to.
"""
pass
def EndFile(self):
"""Flush the current output file to the package container.
Subclasses must implement this.
"""
pass
def DoneWritingArchive(self):
"""Signal that we are done writing the entire package.
Subclasses may implement this if required.
This method must be called to flush remaining data to the output stream.
"""
pass
def IncludeFile(self, path, name):
"""Read a file from disk into the archive.
Args:
path: (str) path to the file.
name: (str) name the file should have in the archive.
"""
output_stream = self.StartFile(name)
input_stream = open(path, 'r')
output_stream.write(input_stream.read())
input_stream.close()
self.EndFile()
def IncludeManyFiles(self, paths, strip_prefix='', new_prefix=None):
"""Include a list of many files.
Args:
paths: (list) list of paths to real files.
strip_prefix: The common prefix to strip from each file.
new_prefix: The replacement for the stripped prefix.
Raises:
ValueError: if any of the paths to not begin with strip_prefix.
"""
for path in paths:
base = path[:len(strip_prefix)]
name = path[len(strip_prefix):]
if strip_prefix != base:
raise ValueError('path: %s did not begin with %s' % (
path, strip_prefix))
if new_prefix:
name = os.path.join(new_prefix, name)
self.IncludeFile(path, name)
def SetFilePathPrefix(self, path):
"""Set a prefix to be prepended to any file names."""
if not path.endswith('/'):
path = '%s/' % path
self._file_path_prefix = path
def IncludeMinimalJarManifest(self,
created_by='1.0.0-google-v1 (Google Inc.)'):
"""Add a minimal MANIFEST.MF, to turn this into a JAR file."""
out = self.StartFile('META-INF/MANIFEST.MF')
out.write('Manifest-Version: 1.0\r\n')
out.write('Created-By: ')
out.write(created_by)
out.write('\r\n\r\n')
self.EndFile()
| mashery/io-wraps | google-apis-client-generator/src/googleapis/codegen/library_package.py | Python | mit | 3,328 |
#!/usr/bin/python
from __future__ import print_function
import sys
from time import sleep, time
from argparse import ArgumentParser
import os
import sqlite3
from datetime import datetime
import errno
import socket
from select import select
import traceback
from collections import deque, defaultdict, namedtuple
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from spidev import SpiDev
import RPi.GPIO as GPIO
from nrf24 import NRF24
import requests
import json
PIPES = ([0xe7, 0xe7, 0xe7, 0xe7, 0xe7], [0xc2, 0xc2, 0xc2, 0xc2, 0xc2])
CHANNEL = 0x20
class Button(object):
def __init__(self, pins):
self.pins = pins
self.states = {}
self.events = Queue()
for i, pin in enumerate(self.pins):
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(pin, GPIO.FALLING, callback=self.add_event,
bouncetime=500)
self.states[pin] = i
def add_event(self, channel):
self.events.put(self.states[channel])
class Relay(object):
def __init__(self, pins):
self.pins = pins
self.states = []
for pin in self.pins:
GPIO.setup(pin, GPIO.OUT, initial=GPIO.HIGH)
self.states.append(0)
def output(self, pin, state):
print("setting pin", pin, state and "on" or "off")
self.states[pin] = state
GPIO.output(self.pins[pin], not state) # These devices are active-low.
def state(self, pin):
return self.states[pin]
def cleanup(self):
pass # this will be done later: GPIO.cleanup()
class Temperature(object):
def __init__(self, major=0, minor=0):
self.spi = SpiDev()
self.spi.open(major, minor)
def rawread(self):
return self.spi.xfer2([0, 0])
def read(self):
return self.calc_temp(self.rawread())
@staticmethod
def calc_temp(buf):
return (((buf[0] << 8) | buf[1]) >> 3) * 0.0625
def cleanup(self):
self.spi.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.cleanup()
class Boiler(object):
def __init__(self, major, minor, ce_pin, irq_pin, temperature, relay, button):
self.relay = relay
self.temperature = temperature
self.button = button
self.radio = NRF24()
self.radio.begin(major, minor, ce_pin, irq_pin)
self.radio.setDataRate(self.radio.BR_250KBPS)
self.radio.setChannel(CHANNEL)
self.radio.setAutoAck(1)
self.radio.enableDynamicPayloads()
self.radio.printDetails()
self.radio.openWritingPipe(PIPES[0])
self.radio.openReadingPipe(1, PIPES[1])
def run(self):
while True:
try:
recv_buffer = self.recv(10)
print("recv_buffer", recv_buffer, "temp", self.temperature.read())
while True:
try:
event = self.button.events.get_nowait()
except Empty:
break
else:
recv_buffer.append(event) # pin = 0, query = 0, state = event
for byte in recv_buffer:
pin = byte >> 2
query = byte >> 1 & 1
state = byte & 1
print("pin", pin, "query", query, "state", state)
if query:
self.radio.write([self.relay.state(pin)])
else:
self.relay.output(pin, state)
start = time()
result = self.radio.write(self.temperature.rawread())
if not result:
print(datetime.now(), "Did not receive ACK from controller after", time() - start, "seconds:", self.radio.last_error)
arc = self.radio.read_register(self.radio.OBSERVE_TX)
if result and arc & 0xf != 0:
print("Last TX succeeded in", arc & 0xf, "retransmissions.")
sys.stdout.flush()
except Exception as exc:
print(exc)
def recv(self, timeout=None):
end = time() + timeout
pipe = [0]
self.radio.startListening()
try:
while not self.radio.available(pipe) and (timeout is None or time() < end):
sleep(10000 / 1e6)
if self.radio.available(pipe):
recv_buffer = []
self.radio.read(recv_buffer)
return recv_buffer
return []
finally:
self.radio.stopListening()
def cleanup(self):
self.radio.end()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.cleanup()
action = namedtuple('action', 'metric value pin state')
class Controller(object):
def __init__(self, major, minor, ce_pin, irq_pin, temperature, db, sock, relay):
self.temperature = temperature
self.db = db
self.sock = sock
self.relay = relay
self.actions = []
self.radio = NRF24()
self.radio.begin(major, minor, ce_pin, irq_pin)
self.radio.setDataRate(self.radio.BR_250KBPS)
self.radio.setChannel(CHANNEL)
self.radio.setAutoAck(1)
self.radio.enableDynamicPayloads()
self.radio.printDetails()
self.radio.openWritingPipe(PIPES[0])
self.radio.openReadingPipe(1, PIPES[1])
def run(self):
try:
tick = time()
while True:
recv_buffer = self.recv(10, rfds=[self.sock])
if recv_buffer and len(recv_buffer) == 2:
self.db.write(1, self.temperature.calc_temp(recv_buffer))
if tick < time():
tick = time() + 10
temp = self.temperature.read()
self.db.write(0, temp)
for i, (metric, value, pin, state) in enumerate(sorted(self.actions)):
if metric == 'temp' and temp >= value or \
metric == 'time' and time() >= value:
del self.actions[i]
result = self.control(pin, state)
print('\n', datetime.now(), "action matched:", metric, value, pin, state, "=>", result)
if not result:
print('action failed, will retry in 10s.')
self.actions.append(action(metric, value, pin, state))
break
try:
conn, _ = self.sock.accept()
except socket.error as exc:
if exc.errno != errno.EAGAIN:
raise
else:
try:
conn.settimeout(10)
recv_line = conn.recv(1024)
args = recv_line[:-1].split(None, 2)
if len(args) > 2:
state, pin, arg = args
pin = int(pin)
if state == 'boost':
args = arg.split()
if len(args) == 2:
metric, value = args
value = float(value)
if metric == 'temp' and temp >= value:
conn.sendall('temperature already above target!\n')
continue
if metric == 'time' and value <= 0:
conn.sendall('time delta must be positive!\n')
continue
if metric == 'time':
value += time()
self.actions.append(action(metric, value, pin, 'off'))
print('\n', datetime.now(), "added action", self.actions)
state = 'on' # continue to turn the boiler on
else:
state, pin = args
pin = int(pin)
if state.lower() in ('on', 'off'):
result = self.control(pin, state)
recv_buffer = '' # Need to clear buffer each time through the loop.
if state.lower() == 'query':
result, recv_buffer = self.state(pin)
elif state.lower() == 'queryactions':
result = True
recv_buffer = str(self.actions)
if isinstance(recv_buffer, list):
if not recv_buffer:
recv_buffer = ''
elif len(recv_buffer) == 1:
recv_buffer = recv_buffer[0]
conn.sendall('%s %s\n' % ('OK' if result else 'timed out', recv_buffer))
except Exception as exc:
print()
print('\n', datetime.now(), "Exception while processing:", repr(recv_line))
traceback.print_exc()
if self.radio.last_error:
print("Last radio error: %r" % self.radio.last_error)
try:
conn.sendall('invalid request: {!s}\n'.format(exc))
except socket.error:
pass
finally:
conn.close()
except KeyboardInterrupt:
print()
def state(self, pin):
if pin < 0:
return True, self.relay.state(-pin - 1)
else:
if self.control(pin, 'query'):
recv_buffer = self.recv(1)
return len(recv_buffer) > 0, recv_buffer
print("control returned not True: %r" % self.radio.last_error)
return False, []
def control(self, pin, state):
if pin < 0:
self.relay.output(-pin - 1, state.lower() == 'on')
return True
else:
cmd = pin << 2 | (state.lower() == 'query') << 1 | (state.lower() == 'on')
return self.radio.write(chr(cmd))
def recv(self, timeout=None, rfds=None):
if rfds is None:
rfds = []
end = time() + (timeout or 0.0)
pipe = [0]
self.radio.startListening()
try:
while not self.radio.available(pipe) and (timeout is None or time() < end):
#sleep(10000 / 1e6)
r, _, _ = select(rfds, [], [], 10000 / 1e6)
if r:
return []
if self.radio.available(pipe):
recv_buffer = []
self.radio.read(recv_buffer)
return recv_buffer
return []
finally:
self.radio.stopListening()
def cleanup(self):
self.radio.end()
self.db.close()
self.temperature.cleanup()
self.sock.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.cleanup()
def tridian(mylist, sum=sum, sorted=sorted):
"""Optimised median function. Assumes delta is 21."""
return sum(sorted(mylist)[7:14]) / 7.
def tridian_slow(mylist):
"""Unoptimised median function."""
sorts = sorted(mylist)
tri = len(sorts) / 3
return sum(sorts[tri:2 * tri]) / float(tri)
class DBWriter(object):
def __init__(self):
self.buf = defaultdict(deque)
self.con = sqlite3.connect('/var/lib/autoboiler/autoboiler.sqlite3')
self.con.isolation_level = None
self.cur = self.con.cursor()
self.cur.execute('''CREATE TABLE IF NOT EXISTS temperature
(date datetime, sensor integer, temperature real)''')
self.cur.execute('''CREATE TABLE IF NOT EXISTS temperature_raw
(date datetime, sensor integer, temperature real)''')
self.cur.execute('''CREATE INDEX IF NOT EXISTS temperature_raw_sensor_date
ON temperature_raw(sensor, date)''')
self.cur.execute('''CREATE INDEX IF NOT EXISTS temperature_sensor_date
ON temperature(sensor, date)''')
def write(self, idx, value):
data = (datetime.now(), idx, value)
line = "%s %d %f" % data
if idx > 0:
print('\033[%dC' % len(line) * idx, end='')
print(line, '\r', end='')
sys.stdout.flush()
self.buf[idx].append(data)
try:
self.cur.execute('insert into temperature_raw values (?, ?, ?)',
data)
res = requests.post('http://emonpi/emoncms/input/post.json?node=1&apikey=74f0ab98df349fdfd17559978fb1d4b9',
data={'data': json.dumps({'T{}raw'.format(idx): value})})
if len(self.buf[idx]) >= 21:
# Take the middle-ish value to use for the time.
data = (self.buf[idx][10][0], idx, tridian([x[2] for x in self.buf[idx]]))
self.buf[idx].popleft()
self.cur.execute('insert into temperature values (?, ?, ?)',
data)
requests.post('http://emonpi/emoncms/input/post.json?node=1&apikey=74f0ab98df349fdfd17559978fb1d4b9',
data={'data': json.dumps({'T{}'.format(idx): value})})
except (requests.exceptions.ConnectionError, sqlite3.OperationalError) as exc:
print('\n', exc)
def close(self):
self.con.commit()
self.cur.close()
self.con.close()
def main():
GPIO.setmode(GPIO.BCM)
parser = ArgumentParser()
parser.add_argument('--mode', required=True, choices=['boiler', 'controller'])
parser.add_argument('--pidfile', '-p', default='/var/run/autoboiler.pid')
parser.add_argument('--sock', '-s', default='/var/lib/autoboiler/autoboiler.socket')
parser.add_argument('--output', '-o')
args = parser.parse_args()
if args.output:
f = open(args.output, 'a+')
if f:
sys.stdout = f
if args.pidfile:
with open(args.pidfile, 'w') as f:
print(os.getpid(), file=f)
try:
if args.mode == 'boiler':
with Boiler(0, 0, 25, 24, Temperature(0, 1), Relay([17, 18]), Button([23, 24])) as radio:
radio.run()
elif args.mode == 'controller':
try:
os.unlink(args.sock)
except OSError as exc:
if exc.errno != errno.ENOENT and os.path.exists(args.sock):
raise
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(args.sock)
os.chmod(args.sock, 0o777)
sock.setblocking(0)
sock.listen(1)
with Controller(0, 1, 25, 24, Temperature(0, 0), DBWriter(), sock, Relay([15, 14])) as radio:
radio.run()
finally:
GPIO.cleanup()
if args.pidfile:
os.unlink(args.pidfile)
if args.sock and args.mode == 'controller':
try:
os.unlink(args.sock)
except OSError as exc:
if exc.errno != errno.ENOENT and os.path.exists(args.sock):
raise
return 0
if __name__ == '__main__':
sys.exit(main())
# vim: set et sw=4 ts=4 sts=4 ai:
| bwduncan/autoboiler | autoboiler.py | Python | agpl-3.0 | 15,780 |
from know_me import models
from know_me.serializers import subscription_serializers
def test_serialize_inactive():
"""
If an inactive subscription is serialized, ``is_active`` should be
``False`` and all other fields should be ``None``.
"""
subscription = models.Subscription(is_active=False)
serializer = subscription_serializers.SubscriptionSerializer(subscription)
assert serializer.data == {
"apple_receipt": None,
"is_active": False,
"is_legacy_subscription": False,
}
def test_serialize_apple_receipt(apple_receipt_factory):
"""
If a subscription backed by an Apple receipt is serialized, it
should return information about the Apple receipt.
"""
receipt = apple_receipt_factory(subscription__is_active=True)
serializer = subscription_serializers.SubscriptionSerializer(
receipt.subscription
)
# Child serializers
receipt_serializer = subscription_serializers.AppleReceiptInfoSerializer(
receipt
)
assert serializer.data == {
"apple_receipt": receipt_serializer.data,
"is_active": True,
"is_legacy_subscription": False,
}
def test_serialize_legacy(subscription_factory):
"""
If a subscription is marked as a legacy subscription, it should
include a flag indicating that.
"""
subscription = subscription_factory(is_legacy_subscription=True)
serializer = subscription_serializers.SubscriptionSerializer(subscription)
assert serializer.data == {
"apple_receipt": None,
"is_active": subscription.is_active,
"is_legacy_subscription": True,
}
| knowmetools/km-api | km_api/know_me/journal/tests/serializers/test_subscription_serializer.py | Python | apache-2.0 | 1,653 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.