gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Testing of admin inline formsets.
"""
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super().save(*args, **kwargs)
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
|
|
#! /usr/bin/env python
import MySQLdb as mdb
import os
import sys
import logging
from operator import itemgetter
import warnings
import cPickle as pk
# The assumption is that the table that is being written to is cleared!
tardir = '../data/ast/'
logdir = '../log/populatedb/'
USESKIPLIST = True
#problemList = [(1,1),(1,2),(1,3),(2,6),(4,4)]
maxEntries = 300
MAXQUEUESIZE = 100
dbread = {}
dbread['Server'] = 'evariste'
dbread['User'] = 'codewebdb'
dbread['Pwd'] = 'n3gr0n1'
dbread['Name'] = 'codewebdb'
dbread['TableName'] = 'original_submissions'
dbwrite = {}
dbwrite['Server'] = 'evariste'
dbwrite['User'] = 'codewebdb'
dbwrite['Pwd'] = 'n3gr0n1'
dbwrite['Name'] = 'codewebdb'
dbwrite['TableName'] = 'octave'
class MultiInserter(object):
def __init__(self,db,maxQueueSize):
self.queue = []
print('Opening database: ' + db['Name'] + '.')
self.con = mdb.connect(db['Server'],db['User'],db['Pwd'],db['Name'])
self.cur = self.con.cursor()
self.db = db
self.maxQueueSize = maxQueueSize
def __del__(self):
print('Flushing...')
self.flush()
print('Closing database: ' + self.db['Name'] + '.')
if self.con:
self.con.close()
self.cur.close()
def add(self, dbentry):
dbentryTuple = (dbentry['hw_id'],dbentry['part_id'],dbentry['ast_id'],dbentry['codestr'],str(dbentry['idlist']),dbentry['jsonstr'],dbentry['mapstr'],dbentry['output'],dbentry['correct'])
self.queue.append(dbentryTuple)
if len(self.queue) == self.maxQueueSize:
self.flush()
def flush(self):
with warnings.catch_warnings():
warnings.simplefilter('error', mdb.Warning)
try:
self.cur.executemany("""INSERT INTO """ + self.db['TableName'] + """ (homework_id,part_id,ast_id,code,coursera_submission_ids,json,map,output,correct) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)""",self.queue)
self.con.commit()
except mdb.Error, e:
raise e
self.queue = []
def opendb(db):
print('Opening database: ' + db['Name'] + '.')
con = mdb.connect(db['Server'],db['User'],db['Pwd'],db['Name'])
cur = con.cursor()
db['connection'] = (con,cur)
def closedb(db):
print('Closing database: ' + db['Name'] + '.')
(con,cur) = db['connection']
if con:
con.close()
cur.close()
def grabOutput(db, submissionids):
(con,cur) = db['connection']
corrects = [0, 0]
outputs = {}
for subid in submissionids:
cur.execute("SELECT output, raw_score FROM " + db['TableName'] + " WHERE id = %s", (subid,))
r = cur.fetchone()
try:
outputs[r[0]] += 1
except KeyError:
outputs[r[0]] = 1
corrects[int(int(r[1])>0)] += 1
correct = int(corrects[0] < corrects[1])
output = max(outputs.iteritems(), key = itemgetter(1))[0]
count = correct
return output, correct, count
def printEntry(dbentry):
print('Homework id: ' + str(dbentry['hw_id']))
print('Part id: ' + str(dbentry['part_id']))
print('Correct: ' + str(dbentry['correct']))
print('Number of submissions: ' + str(len(dbentry['idlist'])))
def loadSubmissionsFile(fname):
submissionids = []
fid = open(fname)
rows = fid.readlines()
for r in rows[2:]:
tmp = r.split(':')
astindex = int(tmp[0])
numsubmissions = int(tmp[1])
idlist = [int(x) for x in tmp[2].split(',')[:-1]]
submissionids.append(idlist)
fid.close()
return submissionids
def loadTextFile(fname):
return open(fname).read()
def loadSkipList(fname):
try:
fid = open(fname,'r')
except IOError:
fid = open(fname,'wt')
fid.write('')
fid.close()
return []
rows = fid.readlines()
fid.close()
pList = []
for r in rows:
pList.append(tuple([int(x) for x in r.rstrip(' \n').split()]))
return pList
def logHwPart(hw_id,part_id,fname):
fid = open(fname,'a')
fid.write(str(hw_id) + ' ' + str(part_id) + '\n')
fid.close()
def report(dbentry,ast_id,numUniqueAST):
rpt = '\n+------------------------------------------------\n' \
+ 'Homework: ' + str(dbentry['hw_id']) + ', Part: ' + str(dbentry['part_id']) + '\n' \
+ 'On AST #' + str(ast_id) + ' of ' + str(numUniqueAST) + '\n' \
+ 'Number of matching submissions: ' + str(len(dbentry['idlist'])) + '\n'
return rpt
def run(writeToTestDB):
logfilename = logdir + 'log'
logging.basicConfig(filename = logfilename, format='%(asctime)s %(message)s',\
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
coarselogfilename = logdir + 'coarselog'
skipList = loadSkipList(coarselogfilename)
# list all tar.gz files in directory
allfiles = []
problems = []
for tarfile in os.listdir(tardir):
if tarfile.endswith('.tar.gz'):
try:
(_,h,p) = tarfile[:(-7)].split('_')
problems.append((h,p))
allfiles.append(tardir + tarfile)
except ValueError:
print('Warning: Unpacking failed for ' + tarfile + ' !!')
# open database connections
Inserter = MultiInserter(dbwrite,MAXQUEUESIZE)
opendb(dbread)
# iterate through files
for tarfile, prob in zip(allfiles,problems):
# filter out problems that we don't want to expand
(hw_id,part_id) = [int(x) for x in prob]
if USESKIPLIST and (hw_id,part_id) in skipList:
continue
print('Untarring Homework: ' + str(hw_id) + ', Problem: ' + str(part_id))
dirname = tarfile[:(-7)]
if not os.path.isdir(os.path.join(tardir,dirname)):
os.system('tar -xzf ' + tarfile + ' -C ' + tardir)
submissionsfile = dirname + '/submissionids.dat'
submissionIDs = loadSubmissionsFile(submissionsfile)
# iterate through each ast id
for idlist,ast_id in zip(submissionIDs,range(len(submissionIDs))):
if ast_id % 100 == 0:
print(str(ast_id) + ' of ' + str(len(submissionIDs)))
if writeToTestDB == True and ast_id >= maxEntries:
break
# load json, map and code files
fname_prefix = dirname + '/ast_' + str(ast_id)
fname_json = fname_prefix + '.json'
fname_map = fname_prefix + '.map'
fname_code = fname_prefix + '.code'
# output and correct (grab this from other database)
dbentry = {}
dbentry['output'],dbentry['correct'],count = grabOutput(dbread, idlist)
dbentry['jsonstr'] = loadTextFile(fname_json)
dbentry['mapstr'] = loadTextFile(fname_map)
dbentry['codestr'] = loadTextFile(fname_code)
dbentry['hw_id'] = hw_id
dbentry['part_id'] = part_id
dbentry['idlist'] = idlist
dbentry['ast_id'] = ast_id
# write to db and log entry
Inserter.add(dbentry)
if ast_id % 20 == 0:
logging.debug(report(dbentry,ast_id,len(submissionIDs)))
# delete the folder
# os.system('rm -rf ' + dirname)
logHwPart(hw_id,part_id,coarselogfilename)
# close database connections
closedb(dbread)
if len(sys.argv) == 2:
argstr = sys.argv[1]
if len(sys.argv) != 2 or (argstr != '-test' and argstr != '-full'):
print('Usage: python populateDB.py [-test, -full]')
sys.exit(1)
print(argstr)
testoption = False
if argstr == '-test':
testoption = True
dbwrite['Name'] = 'codewebdb_test'
run(testoption)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2020 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
LEGACY_METADATA_FILENAME)
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir,
get_platform)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'): # pragma: no cover
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'): # pragma: no cover
IMP_PREFIX = 'jy'
elif sys.platform == 'cli': # pragma: no cover
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
if sys.version_info[1] >= 10:
VER_SUFFIX = '%s_%s' % sys.version_info[:2] # PEP 641 (draft)
else:
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp').split('-')[0]
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# if file_version < (1, 1):
# fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME,
# LEGACY_METADATA_FILENAME]
# else:
# fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]
result = None
for fn in fns:
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
if result:
break
except KeyError:
pass
if not result:
raise ValueError('Invalid wheel, because metadata is '
'missing: looked in %s' % ', '.join(fns))
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy, as mutated
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# sort the entries by archive path. Not needed by any spec, but it
# keeps the archive listing and RECORD tidier than they would otherwise
# be. Use the number of path segments to keep directory entries together,
# and keep the dist-info stuff at the end.
def sorter(t):
ap = t[0]
n = ap.count('/')
if '.dist-info' in ap:
n += 10000
return (n, ap)
archive_paths = sorted(archive_paths, key=sorter)
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def skip_entry(self, arcname):
"""
Determine whether an archive entry should be skipped when verifying
or installing.
"""
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
# We also skip directories, as they won't be in RECORD
# either. See:
#
# https://github.com/pypa/wheel/issues/294
# https://github.com/pypa/wheel/issues/287
# https://github.com/pypa/wheel/pull/289
#
return arcname.endswith(('/', '/RECORD.jws'))
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
bytecode will try to use file-hash based invalidation (PEP-552) on
supported interpreter versions (CPython 2.7+).
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if self.skip_entry(u_arcname):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
# Issue #147: permission bits aren't preserved. Using
# zf.extract(zinfo, libdir) should have worked, but didn't,
# see https://www.thetopsites.net/article/53834422.shtml
# So ... manually preserve permission bits as given in zinfo
if os.name == 'posix':
# just set the normal permission bits
os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile,
hashed_invalidation=bc_hashed_invalidation)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' [%s]' % ','.join(v.flags)
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
'%s.%s' % sys.version_info[:2])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# See issue #115: some wheels have .. in their entries, but
# in the filename ... e.g. __main__..py ! So the check is
# updated to look for .. in the directory portions
p = u_arcname.split('/')
if '..' in p:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
if self.skip_entry(u_arcname):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = path.endswith(LEGACY_METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def _get_glibc_version():
import platform
ver = platform.libc_ver()
result = []
if ver[0] == 'glibc':
for s in ver[1].split('.'):
result.append(int(s) if s.isdigit() else 0)
result = tuple(result)
return result
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# manylinux
if abi != 'none' and sys.platform.startswith('linux'):
arch = arch.replace('linux_', '')
parts = _get_glibc_version()
if len(parts) == 2:
if parts >= (2, 5):
result.append((''.join((IMP_PREFIX, versions[0])), abi,
'manylinux1_%s' % arch))
if parts >= (2, 12):
result.append((''.join((IMP_PREFIX, versions[0])), abi,
'manylinux2010_%s' % arch))
if parts >= (2, 17):
result.append((''.join((IMP_PREFIX, versions[0])), abi,
'manylinux2014_%s' % arch))
result.append((''.join((IMP_PREFIX, versions[0])), abi,
'manylinux_%s_%s_%s' % (parts[0], parts[1],
arch)))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import six
from heat.common import exception
from heat.common import grouputils
from heat.common import template_format
from heat.engine import function
from heat.engine import rsrc_defn
from heat.tests.autoscaling import inline_templates
from heat.tests import common
from heat.tests import utils
class TestAutoScalingGroupValidation(common.HeatTestCase):
def setUp(self):
super(TestAutoScalingGroupValidation, self).setUp()
self.parsed = template_format.parse(inline_templates.as_heat_template)
def test_invalid_min_size(self):
self.parsed['resources']['my-group']['properties']['min_size'] = -1
stack = utils.parse_stack(self.parsed)
self.assertRaises(exception.StackValidationFailed,
stack['my-group'].validate)
def test_invalid_max_size(self):
self.parsed['resources']['my-group']['properties']['max_size'] = -1
stack = utils.parse_stack(self.parsed)
self.assertRaises(exception.StackValidationFailed,
stack['my-group'].validate)
class TestScalingGroupTags(common.HeatTestCase):
def setUp(self):
super(TestScalingGroupTags, self).setUp()
t = template_format.parse(inline_templates.as_heat_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.group = self.stack['my-group']
def test_tags_default(self):
expected = [{'Key': 'metering.groupname',
'Value': u'my-group'},
{'Key': 'metering.AutoScalingGroupName',
'Value': u'my-group'}]
self.assertEqual(expected, self.group._tags())
def test_tags_with_extra(self):
self.group.properties.data['Tags'] = [
{'Key': 'fee', 'Value': 'foo'}]
expected = [{'Key': 'metering.groupname',
'Value': u'my-group'},
{'Key': 'metering.AutoScalingGroupName',
'Value': u'my-group'}]
self.assertEqual(expected, self.group._tags())
def test_tags_with_metering(self):
self.group.properties.data['Tags'] = [
{'Key': 'metering.fee', 'Value': 'foo'}]
expected = [{'Key': 'metering.groupname', 'Value': 'my-group'},
{'Key': 'metering.AutoScalingGroupName',
'Value': u'my-group'}]
self.assertEqual(expected, self.group._tags())
class TestInitialGroupSize(common.HeatTestCase):
scenarios = [
('000', dict(mins=0, maxs=0, desired=0, expected=0)),
('040', dict(mins=0, maxs=4, desired=0, expected=0)),
('253', dict(mins=2, maxs=5, desired=3, expected=3)),
('14n', dict(mins=1, maxs=4, desired=None, expected=1)),
]
def setUp(self):
super(TestInitialGroupSize, self).setUp()
def test_initial_size(self):
t = template_format.parse(inline_templates.as_heat_template)
properties = t['resources']['my-group']['properties']
properties['min_size'] = self.mins
properties['max_size'] = self.maxs
properties['desired_capacity'] = self.desired
stack = utils.parse_stack(t, params=inline_templates.as_params)
group = stack['my-group']
with mock.patch.object(group, '_create_template') as mock_cre_temp:
group.child_template()
mock_cre_temp.assert_called_once_with(self.expected)
class TestGroupAdjust(common.HeatTestCase):
def setUp(self):
super(TestGroupAdjust, self).setUp()
t = template_format.parse(inline_templates.as_heat_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.group = self.stack['my-group']
self.stub_ImageConstraint_validate()
self.stub_FlavorConstraint_validate()
self.stub_SnapshotConstraint_validate()
self.assertIsNone(self.group.validate())
def test_scaling_policy_cooldown_toosoon(self):
"""If _cooldown_inprogress() returns True don't progress."""
dont_call = self.patchobject(grouputils, 'get_size')
with mock.patch.object(self.group, '_cooldown_inprogress',
return_value=True):
self.group.adjust(1)
self.assertEqual([], dont_call.call_args_list)
def test_scaling_policy_cooldown_toosoon_with_signal(self):
with mock.patch.object(self.group, '_cooldown_inprogress',
return_value=True):
self.assertRaises(exception.NoActionRequired, self.group.adjust, 1,
signal=True)
def test_scaling_same_capacity(self):
"""Alway resize even if the capacity is the same."""
self.patchobject(grouputils, 'get_size', return_value=3)
resize = self.patchobject(self.group, 'resize')
cd_stamp = self.patchobject(self.group, '_cooldown_timestamp')
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
self.group.adjust(3, adjustment_type='ExactCapacity')
expected_notifies = [
mock.call(
capacity=3, suffix='start',
adjustment_type='ExactCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=3,
stack=self.group.stack),
mock.call(
capacity=3, suffix='end',
adjustment_type='ExactCapacity',
groupname=u'my-group',
message=u'End resizing the group my-group',
adjustment=3,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(3)
cd_stamp.assert_called_once_with('ExactCapacity : 3')
def test_scale_up_min_adjustment(self):
self.patchobject(grouputils, 'get_size', return_value=1)
resize = self.patchobject(self.group, 'resize')
cd_stamp = self.patchobject(self.group, '_cooldown_timestamp')
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
self.group.adjust(33, adjustment_type='PercentChangeInCapacity',
min_adjustment_step=2)
expected_notifies = [
mock.call(
capacity=1, suffix='start',
adjustment_type='PercentChangeInCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=33,
stack=self.group.stack),
mock.call(
capacity=3, suffix='end',
adjustment_type='PercentChangeInCapacity',
groupname=u'my-group',
message=u'End resizing the group my-group',
adjustment=33,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(3)
cd_stamp.assert_called_once_with('PercentChangeInCapacity : 33')
def test_scale_down_min_adjustment(self):
self.patchobject(grouputils, 'get_size', return_value=3)
resize = self.patchobject(self.group, 'resize')
cd_stamp = self.patchobject(self.group, '_cooldown_timestamp')
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
self.group.adjust(-33, adjustment_type='PercentChangeInCapacity',
min_adjustment_step=2)
expected_notifies = [
mock.call(
capacity=3, suffix='start',
adjustment_type='PercentChangeInCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=-33,
stack=self.group.stack),
mock.call(
capacity=1, suffix='end',
adjustment_type='PercentChangeInCapacity',
groupname=u'my-group',
message=u'End resizing the group my-group',
adjustment=-33,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(1)
cd_stamp.assert_called_once_with('PercentChangeInCapacity : -33')
def test_scaling_policy_cooldown_ok(self):
self.patchobject(grouputils, 'get_size', return_value=0)
resize = self.patchobject(self.group, 'resize')
cd_stamp = self.patchobject(self.group, '_cooldown_timestamp')
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
self.group.adjust(1)
expected_notifies = [
mock.call(
capacity=0, suffix='start', adjustment_type='ChangeInCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=1,
stack=self.group.stack),
mock.call(
capacity=1, suffix='end',
adjustment_type='ChangeInCapacity',
groupname=u'my-group',
message=u'End resizing the group my-group',
adjustment=1,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(1)
cd_stamp.assert_called_once_with('ChangeInCapacity : 1')
grouputils.get_size.assert_called_once_with(self.group)
def test_scaling_policy_resize_fail(self):
self.patchobject(grouputils, 'get_size', return_value=0)
self.patchobject(self.group, 'resize',
side_effect=ValueError('test error'))
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
self.patchobject(self.group, '_cooldown_timestamp')
self.assertRaises(ValueError, self.group.adjust, 1)
expected_notifies = [
mock.call(
capacity=0, suffix='start',
adjustment_type='ChangeInCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=1,
stack=self.group.stack),
mock.call(
capacity=0, suffix='error',
adjustment_type='ChangeInCapacity',
groupname=u'my-group',
message=u'test error',
adjustment=1,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
grouputils.get_size.assert_called_with(self.group)
def test_notification_send_if_resize_failed(self):
"""If resize failed, the capacity of group might have been changed"""
self.patchobject(grouputils, 'get_size', side_effect=[3, 4])
self.patchobject(self.group, 'resize',
side_effect=ValueError('test error'))
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
self.patchobject(self.group, '_cooldown_timestamp')
self.assertRaises(ValueError, self.group.adjust,
5, adjustment_type='ExactCapacity')
expected_notifies = [
mock.call(
capacity=3, suffix='start',
adjustment_type='ExactCapacity',
groupname='my-group',
message='Start resizing the group my-group',
adjustment=5,
stack=self.group.stack),
mock.call(
capacity=4, suffix='error',
adjustment_type='ExactCapacity',
groupname='my-group',
message=u'test error',
adjustment=5,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
self.group.resize.assert_called_once_with(5)
grouputils.get_size.assert_has_calls([mock.call(self.group),
mock.call(self.group)])
class TestGroupCrud(common.HeatTestCase):
def setUp(self):
super(TestGroupCrud, self).setUp()
self.stub_ImageConstraint_validate()
self.stub_FlavorConstraint_validate()
self.stub_SnapshotConstraint_validate()
t = template_format.parse(inline_templates.as_heat_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.group = self.stack['my-group']
self.assertIsNone(self.group.validate())
def test_handle_create(self):
self.group.create_with_template = mock.Mock(return_value=None)
self.group.child_template = mock.Mock(return_value='{}')
self.group.handle_create()
self.group.child_template.assert_called_once_with()
self.group.create_with_template.assert_called_once_with('{}')
def test_handle_update_desired_cap(self):
self.group._try_rolling_update = mock.Mock(return_value=None)
self.group.adjust = mock.Mock(return_value=None)
props = {'desired_capacity': 4}
defn = rsrc_defn.ResourceDefinition(
'nopayload',
'OS::Heat::AutoScalingGroup',
props)
self.group.handle_update(defn, None, props)
self.group.adjust.assert_called_once_with(
4, adjustment_type='ExactCapacity')
self.group._try_rolling_update.assert_called_once_with(props)
def test_handle_update_desired_nocap(self):
self.group._try_rolling_update = mock.Mock(return_value=None)
self.group.adjust = mock.Mock(return_value=None)
get_size = self.patchobject(grouputils, 'get_size')
get_size.return_value = 6
props = {'Tags': []}
defn = rsrc_defn.ResourceDefinition(
'nopayload',
'OS::Heat::AutoScalingGroup',
props)
self.group.handle_update(defn, None, props)
self.group.adjust.assert_called_once_with(
6, adjustment_type='ExactCapacity')
self.group._try_rolling_update.assert_called_once_with(props)
def test_update_in_failed(self):
self.group.state_set('CREATE', 'FAILED')
# to update the failed asg
self.group.adjust = mock.Mock(return_value=None)
new_defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::AutoScalingGroup',
{'AvailabilityZones': ['nova'],
'LaunchConfigurationName': 'config',
'max_size': 5,
'min_size': 1,
'desired_capacity': 2,
'resource':
{'type': 'ResourceWithPropsAndAttrs',
'properties': {
'Foo': 'hello'}}})
self.group.handle_update(new_defn, None, None)
self.group.adjust.assert_called_once_with(
2, adjustment_type='ExactCapacity')
class HeatScalingGroupAttrTest(common.HeatTestCase):
def setUp(self):
super(HeatScalingGroupAttrTest, self).setUp()
t = template_format.parse(inline_templates.as_heat_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.group = self.stack['my-group']
self.assertIsNone(self.group.validate())
def test_no_instance_list(self):
"""Tests inheritance of InstanceList attribute.
The InstanceList attribute is not inherited from
AutoScalingResourceGroup's superclasses.
"""
self.assertRaises(exception.InvalidTemplateAttribute,
self.group.FnGetAtt, 'InstanceList')
def test_output_attribute_list(self):
mock_members = self.patchobject(grouputils, 'get_members')
members = []
output = []
for ip_ex in six.moves.range(1, 4):
inst = mock.Mock()
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
output.append('2.1.3.%d' % ip_ex)
members.append(inst)
mock_members.return_value = members
self.assertEqual(output, self.group.FnGetAtt('outputs_list', 'Bar'))
def test_output_attribute_dict(self):
mock_members = self.patchobject(grouputils, 'get_members')
members = []
output = {}
for ip_ex in six.moves.range(1, 4):
inst = mock.Mock()
inst.name = str(ip_ex)
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
output[str(ip_ex)] = '2.1.3.%d' % ip_ex
members.append(inst)
mock_members.return_value = members
self.assertEqual(output,
self.group.FnGetAtt('outputs', 'Bar'))
def test_attribute_current_size(self):
mock_instances = self.patchobject(grouputils, 'get_size')
mock_instances.return_value = 3
self.assertEqual(3, self.group.FnGetAtt('current_size'))
def test_attribute_current_size_with_path(self):
mock_instances = self.patchobject(grouputils, 'get_size')
mock_instances.return_value = 4
self.assertEqual(4, self.group.FnGetAtt('current_size', 'name'))
def test_index_dotted_attribute(self):
mock_members = self.patchobject(grouputils, 'get_members')
self.group.nested = mock.Mock()
members = []
output = []
for ip_ex in six.moves.range(0, 2):
inst = mock.Mock()
inst.name = str(ip_ex)
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
output.append('2.1.3.%d' % ip_ex)
members.append(inst)
mock_members.return_value = members
self.assertEqual(output[0], self.group.FnGetAtt('resource.0', 'Bar'))
self.assertEqual(output[1], self.group.FnGetAtt('resource.1.Bar'))
self.assertRaises(exception.InvalidTemplateAttribute,
self.group.FnGetAtt, 'resource.2')
def asg_tmpl_with_bad_updt_policy():
t = template_format.parse(inline_templates.as_heat_template)
agp = t['resources']['my-group']['properties']
agp['rolling_updates'] = {"foo": {}}
return json.dumps(t)
def asg_tmpl_with_default_updt_policy():
t = template_format.parse(inline_templates.as_heat_template)
return json.dumps(t)
def asg_tmpl_with_updt_policy(props=None):
t = template_format.parse(inline_templates.as_heat_template)
agp = t['resources']['my-group']['properties']
agp['rolling_updates'] = {
"min_in_service": 1,
"max_batch_size": 2,
"pause_time": 1
}
if props is not None:
agp.update(props)
return json.dumps(t)
class RollingUpdatePolicyTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdatePolicyTest, self).setUp()
self.stub_keystoneclient(username='test_stack.CfnLBUser')
def test_parse_without_update_policy(self):
tmpl = template_format.parse(inline_templates.as_heat_template)
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['my-group']
default_policy = {
'min_in_service': 0,
'pause_time': 0,
'max_batch_size': 1
}
self.assertEqual(default_policy, grp.properties['rolling_updates'])
def test_parse_with_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_updt_policy())
stack = utils.parse_stack(tmpl)
stack.validate()
tmpl_grp = tmpl['resources']['my-group']
tmpl_policy = tmpl_grp['properties']['rolling_updates']
tmpl_batch_sz = int(tmpl_policy['max_batch_size'])
policy = stack['my-group'].properties['rolling_updates']
self.assertTrue(policy)
self.assertTrue(len(policy) == 3)
self.assertEqual(1, int(policy['min_in_service']))
self.assertEqual(tmpl_batch_sz, int(policy['max_batch_size']))
self.assertEqual(1, policy['pause_time'])
def test_parse_with_default_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy())
stack = utils.parse_stack(tmpl)
stack.validate()
policy = stack['my-group'].properties['rolling_updates']
self.assertTrue(policy)
self.assertEqual(3, len(policy))
self.assertEqual(0, int(policy['min_in_service']))
self.assertEqual(1, int(policy['max_batch_size']))
self.assertEqual(0, policy['pause_time'])
def test_parse_with_bad_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_bad_updt_policy())
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("foo", six.text_type(error))
def test_parse_with_bad_pausetime_in_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy())
group = tmpl['resources']['my-group']
group['properties']['rolling_updates'] = {'pause_time': 'a-string'}
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("could not convert string to float",
six.text_type(error))
class RollingUpdatePolicyDiffTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdatePolicyDiffTest, self).setUp()
self.stub_keystoneclient(username='test_stack.CfnLBUser')
def validate_update_policy_diff(self, current, updated):
# load current stack
current_tmpl = template_format.parse(current)
current_stack = utils.parse_stack(current_tmpl)
# get the json snippet for the current InstanceGroup resource
current_grp = current_stack['my-group']
current_snippets = dict((n, r.parsed_template())
for n, r in current_stack.items())
current_grp_json = current_snippets[current_grp.name]
# load the updated stack
updated_tmpl = template_format.parse(updated)
updated_stack = utils.parse_stack(updated_tmpl)
# get the updated json snippet for the InstanceGroup resource in the
# context of the current stack
updated_grp = updated_stack['my-group']
updated_grp_json = function.resolve(updated_grp.t)
# identify the template difference
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
updated_policy = (updated_grp.properties['rolling_updates']
if 'rolling_updates' in updated_grp.properties.data
else None)
self.assertEqual(updated_policy,
tmpl_diff['Properties'].get('rolling_updates'))
# test application of the new update policy in handle_update
update_snippet = rsrc_defn.ResourceDefinition(
current_grp.name,
current_grp.type(),
properties=updated_grp.t['Properties'])
current_grp._try_rolling_update = mock.MagicMock()
current_grp.adjust = mock.MagicMock()
current_grp.handle_update(update_snippet, tmpl_diff, None)
if updated_policy is None:
self.assertIsNone(
current_grp.properties.data.get('rolling_updates'))
else:
self.assertEqual(updated_policy,
current_grp.properties.data['rolling_updates'])
def test_update_policy_added(self):
self.validate_update_policy_diff(inline_templates.as_heat_template,
asg_tmpl_with_updt_policy())
def test_update_policy_updated(self):
extra_props = {'rolling_updates': {
'min_in_service': 2,
'max_batch_size': 4,
'pause_time': 30}}
self.validate_update_policy_diff(
asg_tmpl_with_updt_policy(),
asg_tmpl_with_updt_policy(props=extra_props))
def test_update_policy_removed(self):
self.validate_update_policy_diff(asg_tmpl_with_updt_policy(),
inline_templates.as_heat_template)
class IncorrectUpdatePolicyTest(common.HeatTestCase):
def setUp(self):
super(IncorrectUpdatePolicyTest, self).setUp()
self.stub_keystoneclient(username='test_stack.CfnLBUser')
def test_with_update_policy_aws(self):
t = template_format.parse(inline_templates.as_heat_template)
ag = t['resources']['my-group']
ag["update_policy"] = {"AutoScalingRollingUpdate": {
"MinInstancesInService": "1",
"MaxBatchSize": "2",
"PauseTime": "PT1S"
}}
tmpl = template_format.parse(json.dumps(t))
stack = utils.parse_stack(tmpl)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Unknown Property AutoScalingRollingUpdate',
six.text_type(exc))
def test_with_update_policy_inst_group(self):
t = template_format.parse(inline_templates.as_heat_template)
ag = t['resources']['my-group']
ag["update_policy"] = {"RollingUpdate": {
"MinInstancesInService": "1",
"MaxBatchSize": "2",
"PauseTime": "PT1S"
}}
tmpl = template_format.parse(json.dumps(t))
stack = utils.parse_stack(tmpl)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Unknown Property RollingUpdate', six.text_type(exc))
|
|
"""The instruments module contains functions to open and close VISA, NI-DAQ or COM instruments.
"""
import traceback, importlib, pkgutil, os, sys, time
import pyslave.drivers
class InstrumentError(Exception):
pass
# Get VISA resource manager
try:
import pyvisa as visa
from pyvisa import VisaIOError
# VISA resource manager
class __ResourceManager__(visa.ResourceManager):
def __update__(self):
self.__list_resources_cached__ = self.list_resources()
self.time = time.time()
def check_if_exists(self, query):
"""Check if an instrument is connected to the VISA interface.
The list of connected instruments is update every 60s.
"""
if time.time()-self.time > 60:
self.__update__()
return query in self.__list_resources_cached__
__visa_rm__ = __ResourceManager__()
__visa_rm__.__update__()
except:
__visa_rm__ = None
# Get NIDAQ module
try:
import PyDAQmx as __ni__
import ctypes
except:
__ni__ = None
# Get serial (COM) module
try:
import serial as __com__
except:
__com__ = None
# Parse drivers directory to build driver list
def __drivers__():
drivers = {}
loader = pkgutil.get_loader('pyslave.drivers')
for sub_module in pkgutil.iter_modules([os.path.dirname(loader.get_filename())]):
try:
importlib.invalidate_caches()
m = importlib.import_module( '.{0}'.format(sub_module.name), 'pyslave.drivers')
importlib.reload(m)
drivers.update(m.__drivers__)
except:
pass
return drivers
# Open the instrument and set missing attributes
def __open__(address, driver, resource):
app = driver(address)
app.__resource__ = resource
app.__driver_name__ = driver.__name__
app.__driver_module__ = driver.__module__
if not hasattr(app, '__inst_id__'):
app.__inst_id__ = 'Unknown instrument'
if not hasattr(app, '__inst_type__'):
app.__inst_type__ = 'instr'
if not hasattr(app,'__address__'):
if resource in ('VISA','NIDAQ','COM'):
app.__address__ = address
else:
app.__address__ = ''
return app
def openVISA(address, driver=None, verbose=True):
"""Open the instrument with the specified VISA address and python driver.
The address must be a valid VISA resource or alias.
If driver is None, the instrument id is queried and a corresponding driver
is searched. If the instrument does not answer (e.g. Yoko) or a matching
driver is not found, the generic VISA instrument driver is selected.
The function returns an instance of the driver class.
"""
if __visa_rm__ is None:
raise InstrumentError('PyVISA module not loaded')
# Check if valid VISA identifier
info = __visa_rm__.resource_info(address)
address = info.resource_name
if not __visa_rm__.check_if_exists(address):
raise InstrumentError('{0} is not an available VISA resource on the system. Use listVISA to update the list if the instrument has just been connected.'.format(address))
# Automatic driver selection
if driver is None:
app = __visa_rm__.open_resource(address)
try:
app.clear()
id = app.query('*IDN?')
id = id.split(',')[:2]
id = str(' '.join(id)).strip()
except:
if verbose:
traceback.print_exc(file=sys.stdout)
print("""Identification of {0} failed, using generic VISA instrument.
If the instrument is a Yoko, set the driver to 'yokogawa.yokogawa7651.yokogawa7651'.""".format(address))
id = None
finally:
app.close()
if id is not None:
known_devices = __drivers__()
if id in known_devices:
driver = known_devices[id]
else:
print('No driver found for instrument {0}, using generic VISA instrument.'.format(id))
# Import driver
if driver is not None:
try:
pkg_name, driver_name = driver.rsplit('.',1)
importlib.invalidate_caches()
m = importlib.import_module( '.{0}'.format(pkg_name), 'pyslave.drivers')
importlib.reload(m)
driver = getattr(m, driver_name)
except:
if verbose:
traceback.print_exc(file=sys.stdout)
print('Error while importing instrument driver {0}, using generic VISA instrument.'.format(driver))
driver = __visa_rm__.open_resource
else:
driver = __visa_rm__.open_resource
return __open__(address, driver, 'VISA')
def resetVISA():
"""Reset the VISA connection"""
global __visa_rm__
try:
__visa_rm__.close()
except:
__visa_rm__ = visa.ResourceManager()
def openNIDAQ(devname, driver=None, verbose=True):
"""Open the NI-DAQ device with the specified name and python driver.
If driver is None, the device id is queried and a matching driver
is looked for. If no driver is found for the corresponding id,
an error is raised.
The function returns an instance of the driver class.
"""
if __ni__ is None:
raise InstrumentError('PyDAQmx module not loaded.')
if driver is None:
buffer = ctypes.create_string_buffer('',1024)
__ni__.DAQmxGetDeviceAttribute(devname, __ni__.DAQmx_Dev_ProductType, buffer, 1024)
id = buffer.value.strip()
known_devices = __drivers__()
if id in known_devices:
pkg_name, driver_name = known_devices[id].rsplit('.',1)
importlib.invalidate_caches()
m = importlib.import_module( '.{0}'.format(pkg_name), 'pyslave.drivers')
importlib.reload(m)
driver = getattr(m, driver_name)
else:
raise InstrumentError('No driver for NI-DAQ device {0}.'.format(id))
return __open__(devname, driver,'NIDAQ')
def openCOM(com, driver=None, verbose=True):
"""Open the COM device with the specified COM port and python driver.
If driver is None, the generic com interface driver is used.
The function returns an instance of the driver class.
"""
if __com__ is None:
raise InstrumentError('PySerial module not loaded.')
if driver is not None:
try:
pkg_name, driver_name = driver.rsplit('.',1)
importlib.invalidate_caches()
m = importlib.import_module( '.{0}'.format(pkg_name), 'pyslave.drivers')
importlib.reload(m)
driver = getattr(m, driver_name)
except:
if verbose:
traceback.print_exc(file=sys.stdout)
print('Error while importing instrument driver {0}, using generic COM driver.'.format(driver))
driver = __com__.Serial
else:
driver = __com__.Serial
return __open__(com, driver,'COM')
def openOther(arg, driver, verbose=True):
"""Open specific types of instruments.
The function returns an instance of the driver. The first
argument is passed to the driver."""
pkg_name, driver_name = driver.rsplit('.',1)
importlib.invalidate_caches()
m = importlib.import_module( '.{0}'.format(pkg_name), 'pyslave.drivers')
importlib.reload(m)
driver = getattr(m, driver_name)
return __open__(arg, driver, 'Other')
|
|
"""(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017"""
import json
import re
from time import sleep
from django.test import tag
from django.urls import reverse
from rest_framework.test import APITestCase
from api import rancher
from api.redis import flush_all
from api.tests import KERMIT_SCIPER, KERMIT_UNIT
from config.settings.base import get_config
class ViewsTestCase(APITestCase):
def setUp(self):
flush_all()
def tearDown(self):
flush_all()
def test_get_unit_list(self):
response = self.client.get(
reverse('unit-list'),
data={},
format='json'
)
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, "Units not found")
def test_get_user_list(self):
response = self.client.get(
reverse('user-list'),
data={},
format='json'
)
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, "Users not found")
def test_get_user_detail(self):
response = self.client.get(
reverse(viewname='user-detail', args={'user_id': "133134"}),
format='json'
)
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, "User not found")
def test_get_unit_detail(self):
response = self.client.get(
reverse(viewname='unit-detail', args={'unit_id': KERMIT_UNIT}),
format='json'
)
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content, "Unit not found")
def test_post_apikeys(self):
"""
Test the POST method of KeyView
"""
response = self.client.post(
reverse('apikey-list'),
data={"username": get_config('TEST_USERNAME'), "password": get_config('TEST_CORRECT_PWD')},
format='json'
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(content["access_key"]), 20)
self.assertEqual(len(content["secret_key"]), 40)
self.assertEqual(
response['content-type'],
'application/json'
)
def test_get_apikeys(self):
""" Test the GET method of KeyView """
response = self.client.post(
reverse('apikey-list'),
data={"username": get_config('TEST_USERNAME'), "password": get_config('TEST_CORRECT_PWD')},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
response = self.client.get(
reverse('apikey-list'),
data={"access_key": content["access_key"],
"secret_key": content["secret_key"]},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(content), 1)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
@tag('rancher')
def test_post_schemas(self):
""" Test the POST method of Schemas """
# create API Keys
response = self.client.post(
reverse('apikey-list'),
data={"username": get_config('TEST_USERNAME'), "password": get_config('TEST_CORRECT_PWD')},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
access_key = content["access_key"]
secret_key = content["secret_key"]
# create schemas
response = self.client.post(
reverse('schema-list'),
data={"access_key": access_key,
"secret_key": secret_key},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
"mysql://aa2ea71b:-CxMbtSVdPcY88MH3Vo7@mysql-78bc59f0.db.rsaas.epfl.ch:12068/98c321cb"
self.assertIsNotNone(re.match('^mysql://\w+:[-\+\w]+@[-\.\w]+:\d+/.+$', content['connection_string']))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
# Get schema
response = self.client.get(
reverse(
viewname='schema-detail',
args={content["schema_id"]},
),
data={"access_key": access_key, "secret_key": secret_key},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
self.assertIsNotNone(re.match('^mysql://\w+@[-\.\w]+:\d+/.+$', content['connection_string']))
sleep(10)
# Patch schema
response = self.client.patch(
reverse(
viewname='schema-detail',
args={content["schema_id"]},
),
data={"access_key": access_key, "secret_key": secret_key, "unit_id": "13029"},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
self.assertEqual(content["unit_id"], "13029")
# Clean stacks
# Clean stacks
conn = rancher.Rancher()
conn.clean_stacks(KERMIT_SCIPER)
@tag('rancher')
def test_get_schemas(self):
""" Test the GET method of schemas"""
# create an API key
response = self.client.post(
reverse('apikey-list'),
data={"username": get_config('TEST_USERNAME'), "password": get_config('TEST_CORRECT_PWD')},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.client.post(
reverse('schema-list'),
data={"access_key": content["access_key"],
"secret_key": content["secret_key"]},
format='json'
)
response = self.client.get(
reverse('schema-list'),
data={"access_key": content["access_key"],
"secret_key": content["secret_key"]},
format='json'
)
content = json.loads(response.content.decode('utf-8'))
# we get a list of dicts with 1 element
self.assertEqual(len(content), 1)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['content-type'],
'application/json'
)
# Clean stacks
conn = rancher.Rancher()
conn.clean_stacks(KERMIT_SCIPER)
def test_get_version(self):
""" Test the GET method of Version """
response = self.client.get(
reverse('version-detail'),
format='json'
)
content = json.loads(response.content.decode('utf-8'))
self.assertIsNotNone(re.match('^\d+\.\d+\.\d+$', content))
|
|
"""The tests for the Template lock platform."""
import logging
from homeassistant.core import callback
from homeassistant import setup
from homeassistant.components import lock
from homeassistant.const import STATE_ON, STATE_OFF
from tests.common import (get_test_home_assistant,
assert_setup_component)
_LOGGER = logging.getLogger(__name__)
class TestTemplateLock:
"""Test the Template lock."""
hass = None
calls = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = []
@callback
def record_call(service):
"""Track function calls."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_template_state(self):
"""Test template."""
with assert_setup_component(1, 'lock'):
assert setup.setup_component(self.hass, 'lock', {
'lock': {
'platform': 'template',
'name': 'Test template lock',
'value_template':
"{{ states.switch.test_state.state }}",
'lock': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'unlock': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('lock.test_template_lock')
assert state.state == lock.STATE_LOCKED
self.hass.states.set('switch.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('lock.test_template_lock')
assert state.state == lock.STATE_UNLOCKED
def test_template_state_boolean_on(self):
"""Test the setting of the state with boolean on."""
with assert_setup_component(1, 'lock'):
assert setup.setup_component(self.hass, 'lock', {
'lock': {
'platform': 'template',
'value_template':
"{{ 1 == 1 }}",
'lock': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'unlock': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('lock.template_lock')
assert state.state == lock.STATE_LOCKED
def test_template_state_boolean_off(self):
"""Test the setting of the state with off."""
with assert_setup_component(1, 'lock'):
assert setup.setup_component(self.hass, 'lock', {
'lock': {
'platform': 'template',
'value_template':
"{{ 1 == 2 }}",
'lock': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'unlock': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('lock.template_lock')
assert state.state == lock.STATE_UNLOCKED
def test_template_syntax_error(self):
"""Test templating syntax error."""
with assert_setup_component(0, 'lock'):
assert setup.setup_component(self.hass, 'lock', {
'lock': {
'platform': 'template',
'value_template':
"{% if rubbish %}",
'lock': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'unlock': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_name_does_not_create(self):
"""Test invalid name."""
with assert_setup_component(0, 'lock'):
assert setup.setup_component(self.hass, 'lock', {
'switch': {
'platform': 'lock',
'name': '{{%}',
'value_template':
"{{ rubbish }",
'lock': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'unlock': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_lock_does_not_create(self):
"""Test invalid lock."""
with assert_setup_component(0, 'lock'):
assert setup.setup_component(self.hass, 'lock', {
'lock': {
'platform': 'template',
'value_template': "Invalid"
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_template_does_not_create(self):
"""Test missing template."""
with assert_setup_component(0, 'lock'):
assert setup.setup_component(self.hass, 'lock', {
'lock': {
'platform': 'template',
'not_value_template':
"{{ states.switch.test_state.state }}",
'lock': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'unlock': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_no_template_match_all(self, caplog):
"""Test that we do not allow locks that match on all."""
with assert_setup_component(1, 'lock'):
assert setup.setup_component(self.hass, 'lock', {
'lock': {
'platform': 'template',
'value_template': '{{ 1 + 1 }}',
'lock': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'unlock': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('lock.template_lock')
assert state.state == lock.STATE_UNLOCKED
assert ('Template lock Template Lock has no entity ids configured '
'to track nor were we able to extract the entities to track '
'from the value_template template. This entity will only '
'be able to be updated manually.') in caplog.text
self.hass.states.set('lock.template_lock', lock.STATE_LOCKED)
self.hass.block_till_done()
state = self.hass.states.get('lock.template_lock')
assert state.state == lock.STATE_LOCKED
def test_lock_action(self):
"""Test lock action."""
assert setup.setup_component(self.hass, 'lock', {
'lock': {
'platform': 'template',
'value_template':
"{{ states.switch.test_state.state }}",
'lock': {
'service': 'test.automation'
},
'unlock': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('switch.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('lock.template_lock')
assert state.state == lock.STATE_UNLOCKED
self.hass.services.call(lock.DOMAIN, lock.SERVICE_LOCK, {
lock.ATTR_ENTITY_ID: 'lock.template_lock'
})
self.hass.block_till_done()
assert len(self.calls) == 1
def test_unlock_action(self):
"""Test unlock action."""
assert setup.setup_component(self.hass, 'lock', {
'lock': {
'platform': 'template',
'value_template':
"{{ states.switch.test_state.state }}",
'lock': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'unlock': {
'service': 'test.automation'
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('lock.template_lock')
assert state.state == lock.STATE_LOCKED
self.hass.services.call(lock.DOMAIN, lock.SERVICE_UNLOCK, {
lock.ATTR_ENTITY_ID: 'lock.template_lock'
})
self.hass.block_till_done()
assert len(self.calls) == 1
|
|
"""Provides a variety of introspective-type support functions for
things like call tips and command auto completion."""
__author__ = "Patrick K. O'Brien <pobrien@orbtech.com>"
__cvsid__ = "$Id: introspect.py 39896 2006-06-29 22:24:00Z RD $"
__revision__ = "$Revision: 39896 $"[11:-2]
import cStringIO
import inspect
import sys
import tokenize
import types
import wx
def getAutoCompleteList(command='', locals=None, includeMagic=1,
includeSingle=1, includeDouble=1):
"""Return list of auto-completion options for command.
The list of options will be based on the locals namespace."""
attributes = []
# Get the proper chunk of code from the command.
root = getRoot(command, terminator='.')
try:
if locals is not None:
object = eval(root, locals)
else:
object = eval(root)
except:
pass
else:
attributes = getAttributeNames(object, includeMagic,
includeSingle, includeDouble)
return attributes
def getAttributeNames(object, includeMagic=1, includeSingle=1,
includeDouble=1):
"""Return list of unique attributes, including inherited, for object."""
attributes = []
dict = {}
if not hasattrAlwaysReturnsTrue(object):
# Add some attributes that don't always get picked up.
special_attrs = ['__bases__', '__class__', '__dict__', '__name__',
'func_closure', 'func_code', 'func_defaults',
'func_dict', 'func_doc', 'func_globals', 'func_name']
attributes += [attr for attr in special_attrs \
if hasattr(object, attr)]
if includeMagic:
try: attributes += object._getAttributeNames()
except: pass
# Get all attribute names.
str_type = str(type(object))
if str_type == "<type 'array'>":
attributes += dir(object)
else:
attrdict = getAllAttributeNames(object)
# Store the object's dir.
object_dir = dir(object)
for (obj_type_name, technique, count), attrlist in attrdict.items():
# This complexity is necessary to avoid accessing all the
# attributes of the object. This is very handy for objects
# whose attributes are lazily evaluated.
if type(object).__name__ == obj_type_name and technique == 'dir':
attributes += attrlist
else:
attributes += [attr for attr in attrlist \
if attr not in object_dir and hasattr(object, attr)]
# Remove duplicates from the attribute list.
for item in attributes:
dict[item] = None
attributes = dict.keys()
# new-style swig wrappings can result in non-string attributes
# e.g. ITK http://www.itk.org/
attributes = [attribute for attribute in attributes \
if type(attribute) == str]
attributes.sort(lambda x, y: cmp(x.upper(), y.upper()))
if not includeSingle:
attributes = filter(lambda item: item[0]!='_' \
or item[1:2]=='_', attributes)
if not includeDouble:
attributes = filter(lambda item: item[:2]!='__', attributes)
return attributes
def hasattrAlwaysReturnsTrue(object):
return hasattr(object, 'bogu5_123_aTTri8ute')
def getAllAttributeNames(object):
"""Return dict of all attributes, including inherited, for an object.
Recursively walk through a class and all base classes.
"""
attrdict = {} # (object, technique, count): [list of attributes]
# !!!
# Do Not use hasattr() as a test anywhere in this function,
# because it is unreliable with remote objects: xmlrpc, soap, etc.
# They always return true for hasattr().
# !!!
try:
# This could(?) fail if the type is poorly defined without
# even a name.
key = type(object).__name__
except:
key = 'anonymous'
# Wake up sleepy objects - a hack for ZODB objects in "ghost" state.
wakeupcall = dir(object)
del wakeupcall
# Get attributes available through the normal convention.
attributes = dir(object)
attrdict[(key, 'dir', len(attributes))] = attributes
# Get attributes from the object's dictionary, if it has one.
try:
attributes = object.__dict__.keys()
attributes.sort()
except: # Must catch all because object might have __getattr__.
pass
else:
attrdict[(key, '__dict__', len(attributes))] = attributes
# For a class instance, get the attributes for the class.
try:
klass = object.__class__
except: # Must catch all because object might have __getattr__.
pass
else:
if klass is object:
# Break a circular reference. This happens with extension
# classes.
pass
else:
attrdict.update(getAllAttributeNames(klass))
# Also get attributes from any and all parent classes.
try:
bases = object.__bases__
except: # Must catch all because object might have __getattr__.
pass
else:
if isinstance(bases, types.TupleType):
for base in bases:
if type(base) is types.TypeType:
# Break a circular reference. Happens in Python 2.2.
pass
else:
attrdict.update(getAllAttributeNames(base))
return attrdict
def getCallTip(command='', locals=None):
"""For a command, return a tuple of object name, argspec, tip text.
The call tip information will be based on the locals namespace."""
calltip = ('', '', '') # object name, argspec, tip text.
# Get the proper chunk of code from the command.
root = getRoot(command, terminator='(')
try:
if locals is not None:
object = eval(root, locals)
else:
object = eval(root)
except:
return calltip
name = ''
object, dropSelf = getBaseObject(object)
try:
name = object.__name__
except AttributeError:
pass
tip1 = ''
argspec = ''
if inspect.isbuiltin(object):
# Builtin functions don't have an argspec that we can get.
pass
elif inspect.isfunction(object):
# tip1 is a string like: "getCallTip(command='', locals=None)"
argspec = apply(inspect.formatargspec, inspect.getargspec(object))
if dropSelf:
# The first parameter to a method is a reference to an
# instance, usually coded as "self", and is usually passed
# automatically by Python; therefore we want to drop it.
temp = argspec.split(',')
if len(temp) == 1: # No other arguments.
argspec = '()'
elif temp[0][:2] == '(*': # first param is like *args, not self
pass
else: # Drop the first argument.
argspec = '(' + ','.join(temp[1:]).lstrip()
tip1 = name + argspec
doc = ''
if callable(object):
try:
doc = inspect.getdoc(object)
except:
pass
if doc:
# tip2 is the first separated line of the docstring, like:
# "Return call tip text for a command."
# tip3 is the rest of the docstring, like:
# "The call tip information will be based on ... <snip>
firstline = doc.split('\n')[0].lstrip()
if tip1 == firstline or firstline[:len(name)+1] == name+'(':
tip1 = ''
else:
tip1 += '\n\n'
docpieces = doc.split('\n\n')
tip2 = docpieces[0]
tip3 = '\n\n'.join(docpieces[1:])
tip = '%s%s\n\n%s' % (tip1, tip2, tip3)
else:
tip = tip1
calltip = (name, argspec[1:-1], tip.strip())
return calltip
def getRoot(command, terminator=None):
"""Return the rightmost root portion of an arbitrary Python command.
Return only the root portion that can be eval()'d without side
effects. The command would normally terminate with a '(' or
'.'. The terminator and anything after the terminator will be
dropped."""
command = command.split('\n')[-1]
if command.startswith(sys.ps2):
command = command[len(sys.ps2):]
command = command.lstrip()
command = rtrimTerminus(command, terminator)
tokens = getTokens(command)
if not tokens:
return ''
if tokens[-1][0] is tokenize.ENDMARKER:
# Remove the end marker.
del tokens[-1]
if not tokens:
return ''
if terminator == '.' and \
(tokens[-1][1] <> '.' or tokens[-1][0] is not tokenize.OP):
# Trap decimals in numbers, versus the dot operator.
return ''
else:
# Strip off the terminator.
if terminator and command.endswith(terminator):
size = 0 - len(terminator)
command = command[:size]
command = command.rstrip()
tokens = getTokens(command)
tokens.reverse()
line = ''
start = None
prefix = ''
laststring = '.'
emptyTypes = ('[]', '()', '{}')
for token in tokens:
tokentype = token[0]
tokenstring = token[1]
line = token[4]
if tokentype is tokenize.ENDMARKER:
continue
if tokentype in (tokenize.NAME, tokenize.STRING, tokenize.NUMBER) \
and laststring != '.':
# We've reached something that's not part of the root.
if prefix and line[token[3][1]] != ' ':
# If it doesn't have a space after it, remove the prefix.
prefix = ''
break
if tokentype in (tokenize.NAME, tokenize.STRING, tokenize.NUMBER) \
or (tokentype is tokenize.OP and tokenstring == '.'):
if prefix:
# The prefix isn't valid because it comes after a dot.
prefix = ''
break
else:
# start represents the last known good point in the line.
start = token[2][1]
elif len(tokenstring) == 1 and tokenstring in ('[({])}'):
# Remember, we're working backwords.
# So prefix += tokenstring would be wrong.
if prefix in emptyTypes and tokenstring in ('[({'):
# We've already got an empty type identified so now we
# are in a nested situation and we can break out with
# what we've got.
break
else:
prefix = tokenstring + prefix
else:
# We've reached something that's not part of the root.
break
laststring = tokenstring
if start is None:
start = len(line)
root = line[start:]
if prefix in emptyTypes:
# Empty types are safe to be eval()'d and introspected.
root = prefix + root
return root
def getTokens(command):
"""Return list of token tuples for command."""
# In case the command is unicode try encoding it
if type(command) == unicode:
try:
command = command.encode(wx.GetDefaultPyEncoding())
except UnicodeEncodeError:
pass # otherwise leave it alone
f = cStringIO.StringIO(command)
# tokens is a list of token tuples, each looking like:
# (type, string, (srow, scol), (erow, ecol), line)
tokens = []
# Can't use list comprehension:
# tokens = [token for token in tokenize.generate_tokens(f.readline)]
# because of need to append as much as possible before TokenError.
try:
## This code wasn't backward compatible with Python 2.1.3.
##
## for token in tokenize.generate_tokens(f.readline):
## tokens.append(token)
# This works with Python 2.1.3 (with nested_scopes).
def eater(*args):
tokens.append(args)
tokenize.tokenize_loop(f.readline, eater)
except tokenize.TokenError:
# This is due to a premature EOF, which we expect since we are
# feeding in fragments of Python code.
pass
return tokens
def rtrimTerminus(command, terminator=None):
"""Return command minus anything that follows the final terminator."""
if terminator:
pieces = command.split(terminator)
if len(pieces) > 1:
command = terminator.join(pieces[:-1]) + terminator
return command
def getBaseObject(object):
"""Return base object and dropSelf indicator for an object."""
if inspect.isbuiltin(object):
# Builtin functions don't have an argspec that we can get.
dropSelf = 0
elif inspect.ismethod(object):
# Get the function from the object otherwise
# inspect.getargspec() complains that the object isn't a
# Python function.
try:
if object.im_self is None:
# This is an unbound method so we do not drop self
# from the argspec, since an instance must be passed
# as the first arg.
dropSelf = 0
else:
dropSelf = 1
object = object.im_func
except AttributeError:
dropSelf = 0
elif inspect.isclass(object):
# Get the __init__ method function for the class.
constructor = getConstructor(object)
if constructor is not None:
object = constructor
dropSelf = 1
else:
dropSelf = 0
elif callable(object):
# Get the __call__ method instead.
try:
object = object.__call__.im_func
dropSelf = 1
except AttributeError:
dropSelf = 0
else:
dropSelf = 0
return object, dropSelf
def getConstructor(object):
"""Return constructor for class object, or None if there isn't one."""
try:
return object.__init__.im_func
except AttributeError:
for base in object.__bases__:
constructor = getConstructor(base)
if constructor is not None:
return constructor
return None
|
|
"""
Unit tests for the spade module.
:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division
import unittest
import os
import warnings
import neo
import numpy as np
from numpy.testing.utils import assert_array_almost_equal, assert_array_equal
import quantities as pq
import elephant.spike_train_generation as stg
import elephant.spade as spade
import elephant.conversion as conv
try:
import fim
HAVE_FIM = True
except ImportError:
HAVE_FIM = False
class SpadeTestCase(unittest.TestCase):
def setUp(self):
# Spade parameters
self.binsize = 1 * pq.ms
self.winlen = 10
self.n_subset = 10
self.n_surr = 10
self.alpha = 0.05
self.stability_thresh = [0.1, 0.1]
self.psr_param = [0, 0, 0]
self.min_occ = 4
self.min_spikes = 4
self.min_neu = 4
# Tet data parameters
# Number of patterns' occurrences
self.n_occ1 = 10
self.n_occ2 = 12
self.n_occ3 = 15
# Patterns lags
self.lags1 = [2]
self.lags2 = [1, 2]
self.lags3 = [1, 2, 3, 4, 5]
# Length of the spiketrain
self.t_stop = 3000
# Patterns times
self.patt1_times = neo.SpikeTrain(
np.arange(
0, 1000, 1000//self.n_occ1) *
pq.ms, t_stop=self.t_stop*pq.ms)
self.patt2_times = neo.SpikeTrain(
np.arange(
1000, 2000, 1000 // self.n_occ2) *
pq.ms, t_stop=self.t_stop * pq.ms)
self.patt3_times = neo.SpikeTrain(
np.arange(
2000, 3000, 1000 // self.n_occ3) *
pq.ms, t_stop=self.t_stop * pq.ms)
# Patterns
self.patt1 = [self.patt1_times] + [neo.SpikeTrain(
self.patt1_times.view(pq.Quantity)+l * pq.ms,
t_stop=self.t_stop*pq.ms) for l in self.lags1]
self.patt2 = [self.patt2_times] + [neo.SpikeTrain(
self.patt2_times.view(pq.Quantity)+l * pq.ms,
t_stop=self.t_stop*pq.ms) for l in self.lags2]
self.patt3 = [self.patt3_times] + [neo.SpikeTrain(
self.patt3_times.view(pq.Quantity)+l * pq.ms,
t_stop=self.t_stop*pq.ms) for l in self.lags3]
# Data
self.msip = self.patt1 + self.patt2 + self.patt3
# Expected results
self.n_spk1 = len(self.lags1) + 1
self.n_spk2 = len(self.lags2) + 1
self.n_spk3 = len(self.lags3) + 1
self.elements1 = list(range(self.n_spk1))
self.elements2 = list(range(self.n_spk2))
self.elements3 = list(range(self.n_spk3))
self.elements_msip = [
self.elements1, list(range(self.n_spk1, self.n_spk1 + self.n_spk2)),
list(range(self.n_spk1 + self.n_spk2, self.n_spk1 +
self.n_spk2 + self.n_spk3))]
self.occ1 = np.unique(conv.BinnedSpikeTrain(
self.patt1_times, self.binsize).spike_indices[0])
self.occ2 = np.unique(conv.BinnedSpikeTrain(
self.patt2_times, self.binsize).spike_indices[0])
self.occ3 = np.unique(conv.BinnedSpikeTrain(
self.patt3_times, self.binsize).spike_indices[0])
self.occ_msip = [
list(self.occ1), list(self.occ2), list(self.occ3)]
self.lags_msip = [self.lags1, self.lags2, self.lags3]
# Testing with multiple patterns input
def test_spade_msip(self):
output_msip = spade.spade(self.msip, self.binsize,
self.winlen,
n_subsets=self.n_subset,
stability_thresh=self.stability_thresh,
n_surr=self.n_surr, alpha=self.alpha,
psr_param=self.psr_param,
output_format='patterns')['patterns']
elements_msip = []
occ_msip = []
lags_msip = []
# collecting spade output
for out in output_msip:
elements_msip.append(out['neurons'])
occ_msip.append(list(out['times'].magnitude))
lags_msip.append(list(out['lags'].magnitude))
elements_msip = sorted(elements_msip, key=lambda d: len(d))
occ_msip = sorted(occ_msip, key=lambda d: len(d))
lags_msip = sorted(lags_msip, key=lambda d: len(d))
# check neurons in the patterns
assert_array_equal(elements_msip, self.elements_msip)
# check the occurrences time of the patters
assert_array_equal(occ_msip, self.occ_msip)
# check the lags
assert_array_equal(lags_msip, self.lags_msip)
# test under different configuration of parameters than the default one
def test_parameters(self):
# test min_spikes parameter
output_msip_min_spikes = spade.spade(self.msip, self.binsize,
self.winlen,
n_subsets=self.n_subset,
n_surr=self.n_surr, alpha=self.alpha,
min_spikes=self.min_spikes,
psr_param=self.psr_param,
output_format='patterns')['patterns']
# collecting spade output
elements_msip_min_spikes= []
for out in output_msip_min_spikes:
elements_msip_min_spikes.append(out['neurons'])
elements_msip_min_spikes = sorted(elements_msip_min_spikes, key=lambda d: len(d))
lags_msip_min_spikes= []
for out in output_msip_min_spikes:
lags_msip_min_spikes.append(list(out['lags'].magnitude))
lags_msip_min_spikes = sorted(lags_msip_min_spikes, key=lambda d: len(d))
# check the lags
assert_array_equal(lags_msip_min_spikes, [
l for l in self.lags_msip if len(l)+1>=self.min_spikes])
# check the neurons in the patterns
assert_array_equal(elements_msip_min_spikes, [
el for el in self.elements_msip if len(el)>=self.min_neu and len(
el)>=self.min_spikes])
# test min_occ parameter
output_msip_min_occ = spade.spade(self.msip, self.binsize,
self.winlen,
n_subsets=self.n_subset,
n_surr=self.n_surr, alpha=self.alpha,
min_occ=self.min_occ,
psr_param=self.psr_param,
output_format='patterns')['patterns']
# collect spade output
occ_msip_min_occ= []
for out in output_msip_min_occ:
occ_msip_min_occ.append(list(out['times'].magnitude))
occ_msip_min_occ = sorted(occ_msip_min_occ, key=lambda d: len(d))
# test occurrences time
assert_array_equal(occ_msip_min_occ, [
occ for occ in self.occ_msip if len(occ)>=self.min_occ])
# test to compare the python and the C implementation of FIM
# skip this test if C code not available
@unittest.skipIf(HAVE_FIM == False, 'Requires fim.so')
def test_fpgrowth_fca(self):
binary_matrix = conv.BinnedSpikeTrain(
self.patt1, self.binsize).to_bool_array()
context, transactions, rel_matrix = spade._build_context(
binary_matrix, self.winlen)
# mining the data with python fast_fca
mining_results_fpg = spade._fpgrowth(
transactions,
rel_matrix=rel_matrix)
# mining the data with C fim
mining_results_ffca = spade._fast_fca(context)
# testing that the outputs are identical
assert_array_equal(sorted(mining_results_ffca[0][0]), sorted(
mining_results_fpg[0][0]))
assert_array_equal(sorted(mining_results_ffca[0][1]), sorted(
mining_results_fpg[0][1]))
# test the errors raised
def test_spade_raise_error(self):
self.assertRaises(TypeError, spade.spade, [[1,2,3],[3,4,5]], 1*pq.ms, 4)
self.assertRaises(AttributeError, spade.spade, [neo.SpikeTrain(
[1,2,3]*pq.s, t_stop=5*pq.s), neo.SpikeTrain(
[3,4,5]*pq.s, t_stop=6*pq.s)], 1*pq.ms, 4)
self.assertRaises(AttributeError, spade.spade, [neo.SpikeTrain(
[1, 2, 3] * pq.s, t_stop=5 * pq.s), neo.SpikeTrain(
[3, 4, 5] * pq.s, t_stop=5 * pq.s)], 1 * pq.ms, 4, min_neu=-3)
self.assertRaises(AttributeError, spade.pvalue_spectrum, [
neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=5 * pq.s), neo.SpikeTrain(
[3, 4, 5] * pq.s, t_stop=5 * pq.s)], 1 * pq.ms, 4, 3*pq.ms,
n_surr=-3)
self.assertRaises(AttributeError, spade.test_signature_significance, (
(2, 3, 0.2), (2, 4, 0.1)), 0.01, corr='try')
self.assertRaises(AttributeError, spade.approximate_stability, (),
np.array([]), n_subsets=-3)
def suite():
suite = unittest.makeSuite(SpadeTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
globals()
|
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import falcon
from oslo_log import log as logging
from zaqar.common import decorators
from zaqar.common.transport.wsgi import helpers as wsgi_helpers
from zaqar.i18n import _
from zaqar.storage import errors as storage_errors
from zaqar.transport import utils
from zaqar.transport import validation
from zaqar.transport.wsgi import errors as wsgi_errors
from zaqar.transport.wsgi import utils as wsgi_utils
LOG = logging.getLogger(__name__)
class CollectionResource(object):
__slots__ = (
'_message_controller',
'_queue_controller',
'_wsgi_conf',
'_validate',
'_message_post_spec',
)
def __init__(self, wsgi_conf, validate,
message_controller, queue_controller,
default_message_ttl):
self._wsgi_conf = wsgi_conf
self._validate = validate
self._message_controller = message_controller
self._queue_controller = queue_controller
self._message_post_spec = (
('ttl', int, default_message_ttl),
('body', '*', None),
)
# ----------------------------------------------------------------------
# Helpers
# ----------------------------------------------------------------------
def _get_by_id(self, base_path, project_id, queue_name, ids):
"""Returns one or more messages from the queue by ID."""
try:
self._validate.message_listing(limit=len(ids))
messages = self._message_controller.bulk_get(
queue_name,
message_ids=ids,
project=project_id)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(str(ex))
except Exception:
description = _(u'Message could not be retrieved.')
LOG.exception(description)
raise wsgi_errors.HTTPServiceUnavailable(description)
# Prepare response
messages = list(messages)
if not messages:
return None
messages = [wsgi_utils.format_message_v1_1(m, base_path, m['claim_id'])
for m in messages]
return {'messages': messages}
def _get(self, req, project_id, queue_name):
client_uuid = wsgi_helpers.get_client_uuid(req)
kwargs = {}
# NOTE(kgriffs): This syntax ensures that
# we don't clobber default values with None.
req.get_param('marker', store=kwargs)
req.get_param_as_int('limit', store=kwargs)
req.get_param_as_bool('echo', store=kwargs)
req.get_param_as_bool('include_claimed', store=kwargs)
try:
self._validate.message_listing(**kwargs)
results = self._message_controller.list(
queue_name,
project=project_id,
client_uuid=client_uuid,
**kwargs)
# Buffer messages
cursor = next(results)
messages = list(cursor)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(str(ex))
except storage_errors.QueueDoesNotExist as ex:
LOG.debug(ex)
messages = None
except Exception:
description = _(u'Messages could not be listed.')
LOG.exception(description)
raise wsgi_errors.HTTPServiceUnavailable(description)
if not messages:
messages = []
else:
# Found some messages, so prepare the response
kwargs['marker'] = next(results)
base_path = req.path.rsplit('/', 1)[0]
messages = [wsgi_utils.format_message_v1_1(m, base_path,
m['claim_id'])
for m in messages]
links = []
if messages:
links = [
{
'rel': 'next',
'href': req.path + falcon.to_query_str(kwargs)
}
]
return {
'messages': messages,
'links': links
}
# ----------------------------------------------------------------------
# Interface
# ----------------------------------------------------------------------
@decorators.TransportLog("Messages collection")
def on_post(self, req, resp, project_id, queue_name):
client_uuid = wsgi_helpers.get_client_uuid(req)
try:
# Place JSON size restriction before parsing
self._validate.message_length(req.content_length)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(str(ex))
# Deserialize and validate the incoming messages
document = wsgi_utils.deserialize(req.stream, req.content_length)
if 'messages' not in document:
description = _(u'No messages were found in the request body.')
raise wsgi_errors.HTTPBadRequestAPI(description)
messages = wsgi_utils.sanitize(document['messages'],
self._message_post_spec,
doctype=wsgi_utils.JSONArray)
try:
self._validate.message_posting(messages)
if not self._queue_controller.exists(queue_name, project_id):
self._queue_controller.create(queue_name, project=project_id)
message_ids = self._message_controller.post(
queue_name,
messages=messages,
project=project_id,
client_uuid=client_uuid)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(str(ex))
except storage_errors.DoesNotExist as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPNotFound(str(ex))
except storage_errors.MessageConflict:
description = _(u'No messages could be enqueued.')
LOG.exception(description)
raise wsgi_errors.HTTPServiceUnavailable(description)
except Exception:
description = _(u'Messages could not be enqueued.')
LOG.exception(description)
raise wsgi_errors.HTTPServiceUnavailable(description)
# Prepare the response
ids_value = ','.join(message_ids)
resp.location = req.path + '?ids=' + ids_value
hrefs = [req.path + '/' + id for id in message_ids]
body = {'resources': hrefs}
resp.body = utils.to_json(body)
resp.status = falcon.HTTP_201
@decorators.TransportLog("Messages collection")
def on_get(self, req, resp, project_id, queue_name):
ids = req.get_param_as_list('ids')
if ids is None:
response = self._get(req, project_id, queue_name)
else:
response = self._get_by_id(req.path.rsplit('/', 1)[0], project_id,
queue_name, ids)
if response is None:
# NOTE(TheSriram): Trying to get a message by id, should
# return the message if its present, otherwise a 404 since
# the message might have been deleted.
msg = _(u'No messages with IDs: {ids} found in the queue {queue} '
u'for project {project}.')
description = msg.format(queue=queue_name, project=project_id,
ids=ids)
raise wsgi_errors.HTTPNotFound(description)
else:
resp.body = utils.to_json(response)
# status defaults to 200
@decorators.TransportLog("Messages collection")
def on_delete(self, req, resp, project_id, queue_name):
ids = req.get_param_as_list('ids')
pop_limit = req.get_param_as_int('pop')
try:
self._validate.message_deletion(ids, pop_limit)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(str(ex))
if ids:
resp.status = self._delete_messages_by_id(queue_name, ids,
project_id)
elif pop_limit:
resp.status, resp.body = self._pop_messages(queue_name,
project_id,
pop_limit)
def _delete_messages_by_id(self, queue_name, ids, project_id):
try:
self._message_controller.bulk_delete(
queue_name,
message_ids=ids,
project=project_id)
except Exception:
description = _(u'Messages could not be deleted.')
LOG.exception(description)
raise wsgi_errors.HTTPServiceUnavailable(description)
return falcon.HTTP_204
def _pop_messages(self, queue_name, project_id, pop_limit):
try:
LOG.debug(u'POP messages - queue: %(queue)s, '
u'project: %(project)s',
{'queue': queue_name, 'project': project_id})
messages = self._message_controller.pop(
queue_name,
project=project_id,
limit=pop_limit)
except Exception:
description = _(u'Messages could not be popped.')
LOG.exception(description)
raise wsgi_errors.HTTPServiceUnavailable(description)
# Prepare response
if not messages:
messages = []
body = {'messages': messages}
body = utils.to_json(body)
return falcon.HTTP_200, body
class ItemResource(object):
__slots__ = '_message_controller'
def __init__(self, message_controller):
self._message_controller = message_controller
@decorators.TransportLog("Messages item")
def on_get(self, req, resp, project_id, queue_name, message_id):
try:
message = self._message_controller.get(
queue_name,
message_id,
project=project_id)
except storage_errors.DoesNotExist as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPNotFound(str(ex))
except Exception:
description = _(u'Message could not be retrieved.')
LOG.exception(description)
raise wsgi_errors.HTTPServiceUnavailable(description)
# Prepare response
message['href'] = req.path
message = wsgi_utils.format_message_v1_1(message,
req.path.rsplit('/', 2)[0],
message['claim_id'])
resp.body = utils.to_json(message)
# status defaults to 200
@decorators.TransportLog("Messages item")
def on_delete(self, req, resp, project_id, queue_name, message_id):
error_title = _(u'Unable to delete')
try:
self._message_controller.delete(
queue_name,
message_id=message_id,
project=project_id,
claim=req.get_param('claim_id'))
except storage_errors.MessageNotClaimed as ex:
LOG.debug(ex)
description = _(u'A claim was specified, but the message '
u'is not currently claimed.')
raise falcon.HTTPBadRequest(error_title, description)
except storage_errors.ClaimDoesNotExist as ex:
LOG.debug(ex)
description = _(u'The specified claim does not exist or '
u'has expired.')
raise falcon.HTTPBadRequest(error_title, description)
except storage_errors.NotPermitted as ex:
LOG.debug(ex)
description = _(u'This message is claimed; it cannot be '
u'deleted without a valid claim ID.')
raise falcon.HTTPForbidden(error_title, description)
except Exception:
description = _(u'Message could not be deleted.')
LOG.exception(description)
raise wsgi_errors.HTTPServiceUnavailable(description)
# Alles guete
resp.status = falcon.HTTP_204
|
|
# Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for GCP's spanner instances.
Instances can be created and deleted.
"""
import dataclasses
import json
import logging
from typing import Any, Dict, Optional
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import resource
from perfkitbenchmarker.configs import freeze_restore_spec
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.configs import spec
from perfkitbenchmarker.providers.gcp import util
import requests
FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_spanner_config',
None,
'The config for the Cloud Spanner instance. Use default '
'config if unset.')
flags.DEFINE_integer('cloud_spanner_nodes', None,
'The number of nodes for the Cloud Spanner instance.')
flags.DEFINE_string('cloud_spanner_project',
None,
'The project for the Cloud Spanner instance. Use default '
'project if unset.')
# Valid GCP Spanner types:
DEFAULT_SPANNER_TYPE = 'default'
_DEFAULT_REGION = 'us-central1'
_DEFAULT_DESCRIPTION = 'Spanner instance created by PKB.'
_DEFAULT_DDL = """
CREATE TABLE pkb_table (
id STRING(MAX),
field0 STRING(MAX)
) PRIMARY KEY(id)
"""
_DEFAULT_NODES = 1
_FROZEN_NODE_COUNT = 1
# Common decoder configuration option.
_NONE_OK = {'default': None, 'none_ok': True}
@dataclasses.dataclass
class SpannerSpec(freeze_restore_spec.FreezeRestoreSpec):
"""Configurable options of a Spanner instance."""
# Needed for registering the spec class.
SPEC_TYPE = 'SpannerSpec'
SPEC_ATTRS = ['SERVICE_TYPE']
SERVICE_TYPE = DEFAULT_SPANNER_TYPE
service_type: str
name: str
description: str
database: str
ddl: str
config: str
nodes: int
project: str
def __init__(self,
component_full_name: str,
flag_values: Optional[Dict[str, flags.FlagValues]] = None,
**kwargs):
super().__init__(component_full_name, flag_values=flag_values, **kwargs)
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword arguments
to construct in order to decode the named option.
"""
result = super()._GetOptionDecoderConstructions()
result.update({
'service_type': (
option_decoders.EnumDecoder,
{
'valid_values': [
DEFAULT_SPANNER_TYPE,
],
'default': DEFAULT_SPANNER_TYPE
}),
'name': (option_decoders.StringDecoder, _NONE_OK),
'database': (option_decoders.StringDecoder, _NONE_OK),
'description': (option_decoders.StringDecoder, _NONE_OK),
'ddl': (option_decoders.StringDecoder, _NONE_OK),
'config': (option_decoders.StringDecoder, _NONE_OK),
'nodes': (option_decoders.IntDecoder, _NONE_OK),
'project': (option_decoders.StringDecoder, _NONE_OK),
})
return result
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
super()._ApplyFlags(config_values, flag_values)
if flag_values['cloud_spanner_config'].present:
config_values['config'] = flag_values.cloud_spanner_config
if flag_values['cloud_spanner_nodes'].present:
config_values['nodes'] = flag_values.cloud_spanner_nodes
if flag_values['cloud_spanner_project'].present:
config_values['project'] = flag_values.cloud_spanner_project
def GetSpannerSpecClass(service_type) -> Optional[spec.BaseSpecMetaClass]:
"""Return the SpannerSpec class corresponding to 'service_type'."""
return spec.GetSpecClass(SpannerSpec, SERVICE_TYPE=service_type)
class GcpSpannerInstance(resource.BaseResource):
"""Object representing a GCP Spanner Instance.
The project and Cloud Spanner config must already exist. Instance and database
will be created and torn down before and after the test.
The following parameters are overridden by the corresponding FLAGs.
project: FLAGS.cloud_spanner_project
config: FLAGS.cloud_spanner_config
nodes: FLAGS.cloud_spanner_nodes
Attributes:
name: Name of the instance to create.
description: Description of the instance.
database: Name of the database to create
ddl: The schema of the database.
"""
# Required for registering the class.
RESOURCE_TYPE = 'GcpSpannerInstance'
REQUIRED_ATTRS = ['SERVICE_TYPE']
SERVICE_TYPE = DEFAULT_SPANNER_TYPE
def __init__(self,
name: Optional[str] = None,
description: Optional[str] = None,
database: Optional[str] = None,
ddl: Optional[str] = None,
config: Optional[str] = None,
nodes: Optional[int] = None,
project: Optional[str] = None,
**kwargs):
super(GcpSpannerInstance, self).__init__(**kwargs)
self.name = name or f'pkb-instance-{FLAGS.run_uri}'
self.database = database or f'pkb-database-{FLAGS.run_uri}'
self._description = description or _DEFAULT_DESCRIPTION
self._ddl = ddl or _DEFAULT_DDL
self._config = config or self._GetDefaultConfig()
self._nodes = nodes or _DEFAULT_NODES
self._end_point = None
# Cloud Spanner may not explicitly set the following common flags.
self.project = (
project or FLAGS.project or util.GetDefaultProject())
self.zone = None
def _GetDefaultConfig(self) -> str:
"""Gets the config that corresponds the region used for the test."""
try:
region = util.GetRegionFromZone(
FLAGS.zones[0] if FLAGS.zones else FLAGS.zone[0])
except IndexError:
region = _DEFAULT_REGION
return f'regional-{region}'
@classmethod
def FromSpec(cls, spanner_spec: SpannerSpec) -> 'GcpSpannerInstance':
"""Initialize Spanner from the provided spec."""
return cls(
name=spanner_spec.name,
description=spanner_spec.description,
database=spanner_spec.database,
ddl=spanner_spec.ddl,
config=spanner_spec.config,
nodes=spanner_spec.nodes,
project=spanner_spec.project,
enable_freeze_restore=spanner_spec.enable_freeze_restore,
create_on_restore_error=spanner_spec.create_on_restore_error,
delete_on_freeze_error=spanner_spec.delete_on_freeze_error)
def _Create(self) -> None:
"""Creates the instance, the database, and update the schema."""
cmd = util.GcloudCommand(self, 'spanner', 'instances', 'create', self.name)
cmd.flags['description'] = self._description
cmd.flags['nodes'] = self._nodes
cmd.flags['config'] = self._config
_, _, retcode = cmd.Issue(raise_on_failure=False)
if retcode != 0:
logging.error('Create GCP Spanner instance failed.')
return
self._UpdateLabels(util.GetDefaultTags())
cmd = util.GcloudCommand(self, 'spanner', 'databases', 'create',
self.database)
cmd.flags['instance'] = self.name
_, _, retcode = cmd.Issue(raise_on_failure=False)
if retcode != 0:
logging.error('Create GCP Spanner database failed.')
return
cmd = util.GcloudCommand(self, 'spanner', 'databases', 'ddl', 'update',
self.database)
cmd.flags['instance'] = self.name
cmd.flags['ddl'] = self._ddl
_, _, retcode = cmd.Issue(raise_on_failure=False)
if retcode != 0:
logging.error('Update GCP Spanner database schema failed.')
else:
logging.info('Created GCP Spanner instance and database.')
def _Delete(self) -> None:
"""Deletes the instance."""
cmd = util.GcloudCommand(self, 'spanner', 'instances', 'delete',
self.name)
_, _, retcode = cmd.Issue(raise_on_failure=False)
if retcode != 0:
logging.error('Delete GCP Spanner instance failed.')
else:
logging.info('Deleted GCP Spanner instance.')
def _Exists(self, instance_only: bool = False) -> bool:
"""Returns true if the instance and the database exists."""
cmd = util.GcloudCommand(self, 'spanner', 'instances', 'describe',
self.name)
# Do not log error or warning when checking existence.
_, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)
if retcode != 0:
logging.info('Could not find GCP Spanner instance %s.', self.name)
return False
if instance_only:
return True
cmd = util.GcloudCommand(self, 'spanner', 'databases', 'describe',
self.database)
cmd.flags['instance'] = self.name
# Do not log error or warning when checking existence.
_, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)
if retcode != 0:
logging.info('Could not find GCP Spanner database %s.', self.database)
return False
return True
def GetEndPoint(self) -> Optional[str]:
"""Returns the end point for Cloud Spanner."""
if self._end_point:
return self._end_point
cmd = util.GcloudCommand(self, 'config', 'get-value',
'api_endpoint_overrides/spanner')
stdout, _, retcode = cmd.Issue(raise_on_failure=False)
if retcode != 0:
logging.warning('Fail to retrieve cloud spanner end point.')
return None
self._end_point = json.loads(stdout)
return self._end_point
def _SetNodes(self, nodes: int) -> None:
"""Sets the number of nodes on the Spanner instance."""
cmd = util.GcloudCommand(self, 'spanner', 'instances', 'update', self.name)
cmd.flags['nodes'] = nodes
cmd.Issue(raise_on_failure=True)
def _Restore(self) -> None:
"""See base class.
Increases the number of nodes on the instance to the specified number. See
https://cloud.google.com/spanner/pricing for Spanner pricing info.
"""
self._SetNodes(self._nodes)
def _Freeze(self) -> None:
"""See base class.
Lowers the number of nodes on the instance to one. Note there are
restrictions to being able to lower the number of nodes on an instance. See
https://cloud.google.com/spanner/docs/create-manage-instances.
"""
self._SetNodes(_FROZEN_NODE_COUNT)
def _GetLabels(self) -> Dict[str, Any]:
"""Gets labels from the current instance."""
cmd = util.GcloudCommand(self, 'spanner', 'instances', 'describe',
self.name)
stdout, _, _ = cmd.Issue(raise_on_failure=True)
return json.loads(stdout).get('labels', {})
def _UpdateLabels(self, labels: Dict[str, Any]) -> None:
"""Updates the labels of the current instance."""
header = {'Authorization': f'Bearer {util.GetAccessToken()}'}
url = ('https://spanner.googleapis.com/v1/projects/'
f'{self.project}/instances/{self.name}')
# Keep any existing labels
tags = self._GetLabels()
tags.update(labels)
args = {
'instance': {
'labels': tags
},
'fieldMask': 'labels',
}
response = requests.patch(url, headers=header, json=args)
logging.info('Update labels: status code %s, %s',
response.status_code, response.text)
if response.status_code != 200:
raise errors.Resource.UpdateError(
f'Unable to update Spanner instance: {response.text}')
def _UpdateTimeout(self, timeout_minutes: int) -> None:
"""See base class."""
labels = util.GetDefaultTags(timeout_minutes)
self._UpdateLabels(labels)
def GetSpannerClass(
service_type: str) -> Optional[resource.AutoRegisterResourceMeta]:
"""Return the Spanner class associated with service_type."""
return resource.GetResourceClass(
GcpSpannerInstance, SERVICE_TYPE=service_type)
|
|
import json
from decimal import Decimal
from django.conf import settings
from django.core.urlresolvers import reverse
import commonware.log
from rest_framework import response, serializers
from django.utils.translation import ungettext as ngettext
import mkt
from mkt.api.fields import (ESTranslationSerializerField, LargeTextField,
ReverseChoiceField, SemiSerializerMethodField,
TranslationSerializerField)
from mkt.constants.applications import DEVICE_TYPES
from mkt.constants.categories import CATEGORY_CHOICES
from mkt.constants.iarc_mappings import HUMAN_READABLE_DESCS_AND_INTERACTIVES
from mkt.constants.payments import PROVIDER_BANGO
from mkt.features.utils import load_feature_profile
from mkt.prices.models import AddonPremium, Price
from mkt.search.serializers import BaseESSerializer, es_to_datetime
from mkt.site.helpers import absolutify
from mkt.submit.forms import mark_for_rereview
from mkt.submit.serializers import PreviewSerializer, SimplePreviewSerializer
from mkt.tags.models import attach_tags
from mkt.translations.utils import no_translation
from mkt.versions.models import Version
from mkt.webapps.models import (AddonUpsell, AppFeatures, Geodata, Preview,
Webapp)
from mkt.webapps.utils import dehydrate_content_rating
log = commonware.log.getLogger('z.api')
def http_error(errorclass, reason, extra_data=None):
r = errorclass()
data = {'reason': reason}
if extra_data:
data.update(extra_data)
r.content = json.dumps(data)
return response.Response(r)
class AppFeaturesSerializer(serializers.ModelSerializer):
class Meta:
model = AppFeatures
def to_representation(self, obj):
ret = super(AppFeaturesSerializer, self).to_representation(obj)
ret['required'] = obj.to_list()
return ret
class RegionSerializer(serializers.Serializer):
name = serializers.CharField()
slug = serializers.CharField()
mcc = serializers.CharField()
adolescent = serializers.BooleanField()
class BaseAppSerializer(serializers.ModelSerializer):
# REST Framework 3.x doesn't allow meta.fields to omit fields declared in
# the class body, but it does allow omitting ones in superclasses. All the
# serializers are subsets of the full field collection, hence this
# superclass.
app_type = serializers.ChoiceField(
choices=mkt.ADDON_WEBAPP_TYPES_LOOKUP.items(), read_only=True)
author = serializers.CharField(source='developer_name', read_only=True)
categories = serializers.ListField(
child=serializers.ChoiceField(choices=CATEGORY_CHOICES,
read_only=False),
read_only=False,
required=True)
content_ratings = serializers.SerializerMethodField()
created = serializers.DateTimeField(read_only=True,
format=None)
current_version = serializers.CharField(source='current_version.version',
read_only=True)
default_locale = serializers.CharField(read_only=True)
device_types = SemiSerializerMethodField()
description = TranslationSerializerField(required=False)
homepage = TranslationSerializerField(required=False)
feature_compatibility = serializers.SerializerMethodField()
file_size = serializers.IntegerField(read_only=True)
icons = serializers.SerializerMethodField()
id = serializers.IntegerField(source='pk', required=False)
is_disabled = serializers.BooleanField(read_only=True)
is_homescreen = serializers.SerializerMethodField()
is_offline = serializers.BooleanField(read_only=True)
is_packaged = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True,
format=None)
manifest_url = serializers.CharField(source='get_manifest_url',
read_only=True)
modified = serializers.DateTimeField(read_only=True,
format=None)
name = TranslationSerializerField(required=False)
package_path = serializers.CharField(source='get_package_path',
read_only=True)
payment_account = serializers.SerializerMethodField()
payment_required = serializers.SerializerMethodField()
premium_type = ReverseChoiceField(
choices_dict=mkt.ADDON_PREMIUM_API, required=False)
previews = PreviewSerializer(many=True, required=False,
source='all_previews')
price = SemiSerializerMethodField(source='*', required=False)
price_locale = serializers.SerializerMethodField()
privacy_policy = LargeTextField(view_name='app-privacy-policy-detail',
queryset=Webapp.objects,
required=False)
promo_imgs = serializers.SerializerMethodField()
public_stats = serializers.BooleanField(read_only=True)
ratings = serializers.SerializerMethodField('get_ratings_aggregates')
regions = RegionSerializer(read_only=True, source='get_regions', many=True)
release_notes = TranslationSerializerField(
read_only=True,
source='current_version.releasenotes')
resource_uri = serializers.HyperlinkedIdentityField(view_name='app-detail')
slug = serializers.CharField(source='app_slug', required=False)
status = serializers.IntegerField(read_only=True)
support_email = TranslationSerializerField(required=False)
support_url = TranslationSerializerField(required=False)
supported_locales = serializers.SerializerMethodField()
tags = serializers.SerializerMethodField()
upsell = serializers.SerializerMethodField()
upsold = serializers.HyperlinkedRelatedField(
view_name='app-detail', source='upsold.free',
required=False, queryset=Webapp.objects.all())
user = serializers.SerializerMethodField('get_user_info')
versions = serializers.SerializerMethodField()
class AppSerializer(BaseAppSerializer):
class Meta:
model = Webapp
fields = [
'app_type', 'author', 'categories', 'content_ratings', 'created',
'current_version', 'default_locale', 'description', 'device_types',
'feature_compatibility', 'file_size', 'homepage', 'hosted_url',
'icons', 'id', 'is_disabled', 'is_homescreen', 'is_offline',
'is_packaged', 'last_updated', 'manifest_url', 'name',
'package_path', 'payment_account', 'payment_required',
'premium_type', 'previews', 'price', 'price_locale',
'privacy_policy', 'promo_imgs', 'public_stats', 'release_notes',
'ratings', 'regions', 'resource_uri', 'slug', 'status',
'support_email', 'support_url', 'supported_locales', 'tags',
'upsell', 'upsold', 'user', 'versions'
]
def _get_region_id(self):
request = self.context.get('request')
REGION = getattr(request, 'REGION', None)
return REGION.id if REGION else None
def _get_region_slug(self):
request = self.context.get('request')
REGION = getattr(request, 'REGION', None)
return REGION.slug if REGION else None
def get_content_ratings(self, app):
body = mkt.regions.REGION_TO_RATINGS_BODY().get(
self._get_region_slug(), 'generic')
return {
'body': body,
'rating': app.get_content_ratings_by_body().get(body, None),
'descriptors': (
app.rating_descriptors.to_keys_by_body(body)
if hasattr(app, 'rating_descriptors') else []),
'descriptors_text': (
[HUMAN_READABLE_DESCS_AND_INTERACTIVES[key]
for key in app.rating_descriptors.to_keys_by_body(body)]
if hasattr(app, 'rating_descriptors') else []),
'interactives': (
app.rating_interactives.to_keys()
if hasattr(app, 'rating_interactives') else []),
'interactives_text': (
[HUMAN_READABLE_DESCS_AND_INTERACTIVES[key] for key in
app.rating_interactives.to_keys()]
if hasattr(app, 'rating_interactives') else []),
}
def get_icons(self, app):
return dict([(icon_size, app.get_icon_url(icon_size))
for icon_size in mkt.CONTENT_ICON_SIZES])
def get_feature_compatibility(self, app):
request = self.context['request']
if not hasattr(request, 'feature_profile'):
load_feature_profile(request)
if request.feature_profile is None or app.current_version is None:
# No profile information sent, or we don't have a current version,
# we can't return compatibility, return null.
return None
app_features = app.current_version.features.to_list()
return request.feature_profile.has_features(app_features)
def get_payment_account(self, app):
# Avoid a query for payment_account if the app is not premium.
if not app.is_premium():
return None
try:
# This is a soon to be deprecated API property that only
# returns the Bango account for historic compatibility.
app_acct = app.payment_account(PROVIDER_BANGO)
return reverse('payment-account-detail',
args=[app_acct.payment_account.pk])
except app.PayAccountDoesNotExist:
return None
def get_payment_required(self, app):
if app.has_premium():
tier = app.get_tier()
return bool(tier and tier.price)
return False
def get_price(self, app):
if app.has_premium():
price = app.get_price(region=self._get_region_id())
if price is not None:
return unicode(price)
return None
def get_price_locale(self, app):
if app.has_premium():
return app.get_price_locale(region=self._get_region_id())
return None
def get_promo_imgs(self, obj):
return dict([(promo_img_size, obj.get_promo_img_url(promo_img_size))
for promo_img_size in mkt.PROMO_IMG_SIZES])
def get_ratings_aggregates(self, app):
return {'average': app.average_rating,
'count': app.total_reviews}
def get_supported_locales(self, app):
locs = getattr(app.current_version, 'supported_locales', '')
if locs:
return locs.split(',') if isinstance(locs, basestring) else locs
else:
return []
def get_tags(self, app):
if not hasattr(app, 'tags_list'):
attach_tags([app])
return getattr(app, 'tags_list', [])
def get_upsell(self, app):
upsell = False
if app.upsell:
upsell = app.upsell.premium
# Only return the upsell app if it's public and we are not in an
# excluded region.
if (upsell and upsell.is_public() and self._get_region_id()
not in upsell.get_excluded_region_ids()):
return {
'id': upsell.id,
'app_slug': upsell.app_slug,
'icon_url': upsell.get_icon_url(128),
'name': unicode(upsell.name),
'resource_uri': reverse('app-detail', kwargs={'pk': upsell.pk})
}
else:
return False
def get_user_info(self, app):
request = self.context.get('request')
if request and request.user.is_authenticated():
user = request.user
return {
'developed': app.addonuser_set.filter(
user=user, role=mkt.AUTHOR_ROLE_OWNER).exists(),
'installed': app.has_installed(user),
'purchased': app.pk in user.purchase_ids(),
}
def get_is_homescreen(self, app):
return app.is_homescreen()
def get_versions(self, app):
# Disable transforms, we only need two fields: version and pk.
# Unfortunately, cache-machine gets in the way so we can't use .only()
# (.no_transforms() is ignored, defeating the purpose), and we can't
# use .values() / .values_list() because those aren't cached :(
return dict((v.version, reverse('version-detail', kwargs={'pk': v.pk}))
for v in app.versions.all().no_transforms())
def validate_categories(self, categories):
set_categories = set(categories)
total = len(set_categories)
max_cat = mkt.MAX_CATEGORIES
if total > max_cat:
# L10n: {0} is the number of categories.
raise serializers.ValidationError(ngettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
return categories
def get_device_types(self, device_types):
with no_translation():
return [n.api_name for n in device_types]
def save_device_types(self, obj, new_types):
new_types = [mkt.DEVICE_LOOKUP[d].id for d in new_types]
old_types = [x.id for x in obj.device_types]
added_devices = set(new_types) - set(old_types)
removed_devices = set(old_types) - set(new_types)
for d in added_devices:
obj.addondevicetype_set.create(device_type=d)
for d in removed_devices:
obj.addondevicetype_set.filter(device_type=d).delete()
# Send app to re-review queue if public and new devices are added.
if added_devices and obj.status in mkt.WEBAPPS_APPROVED_STATUSES:
mark_for_rereview(obj, added_devices, removed_devices)
def save_upsold(self, obj, upsold):
current_upsell = obj.upsold
if upsold and upsold != obj.upsold.free:
if not current_upsell:
log.debug('[1@%s] Creating app upsell' % obj.pk)
current_upsell = AddonUpsell(premium=obj)
current_upsell.free = upsold
current_upsell.save()
elif current_upsell:
# We're deleting the upsell.
log.debug('[1@%s] Deleting the app upsell' % obj.pk)
current_upsell.delete()
def save_price(self, obj, price):
# Only valid for premium apps; don't call this on free ones.
valid_prices = Price.objects.exclude(
price='0.00').values_list('price', flat=True)
if not (price and Decimal(price) in valid_prices):
raise serializers.ValidationError(
{'price':
['Premium app specified without a valid price. Price can be'
' one of %s.' % (', '.join('"%s"' % str(p)
for p in valid_prices),)]})
premium = obj.premium
if not premium:
premium = AddonPremium()
premium.addon = obj
premium.price = Price.objects.active().get(price=price)
premium.save()
def validate_device_types(self, device_types):
for v in device_types:
if v not in mkt.DEVICE_LOOKUP.keys():
raise serializers.ValidationError(
str(v) + ' is not one of the available choices.')
return device_types
def validate_price(self, price):
return {'price': price}
def update(self, instance, attrs):
extras = []
# Upsell bits are handled here because we need to remove it
# from the attrs dict before deserializing.
upsold = attrs.pop('upsold.free', None)
if upsold is not None:
extras.append((self.save_upsold, upsold))
price = attrs.pop('price', None)
if attrs.get('premium_type') not in (mkt.ADDON_FREE,
mkt.ADDON_FREE_INAPP):
extras.append((self.save_price, price))
device_types = attrs.pop('device_types', None)
if device_types is not None:
extras.append((self.save_device_types, device_types))
if instance:
instance = super(AppSerializer, self).update(instance, attrs)
else:
instance = super(AppSerializer, self).create(attrs)
for f, v in extras:
f(instance, v)
return instance
def create(self, data):
return self.update(None, data)
class ESAppSerializer(BaseESSerializer, AppSerializer):
# Fields specific to search.
absolute_url = serializers.SerializerMethodField()
reviewed = serializers.DateTimeField(format=None,
read_only=True)
# Override previews, because we don't need the full PreviewSerializer.
previews = SimplePreviewSerializer(many=True, source='all_previews')
# Override those, because we want a different source. Also, related fields
# will call self.queryset early if they are not read_only, so force that.
file_size = serializers.SerializerMethodField()
is_disabled = serializers.BooleanField(source='_is_disabled',
read_only=True)
manifest_url = serializers.CharField()
package_path = serializers.SerializerMethodField()
# Feed collection.
group = ESTranslationSerializerField(required=False)
# The fields we want converted to Python date/datetimes.
datetime_fields = ('created', 'last_updated', 'modified', 'reviewed')
class Meta(AppSerializer.Meta):
fields = AppSerializer.Meta.fields + ['absolute_url', 'group',
'reviewed']
def __init__(self, *args, **kwargs):
super(ESAppSerializer, self).__init__(*args, **kwargs)
# Remove fields that we don't have in ES at the moment.
self.fields.pop('upsold', None)
def fake_object(self, data):
"""Create a fake instance of Webapp and related models from ES data."""
is_packaged = data['app_type'] != mkt.ADDON_WEBAPP_HOSTED
is_privileged = data['app_type'] == mkt.ADDON_WEBAPP_PRIVILEGED
obj = Webapp(id=data['id'], app_slug=data['app_slug'],
is_packaged=is_packaged, icon_type='image/png')
# Set relations and attributes we need on those relations.
# The properties set on latest_version and current_version differ
# because we are only setting what the serializer is going to need.
# In particular, latest_version.is_privileged needs to be set because
# it's used by obj.app_type_id.
obj.listed_authors = []
obj._current_version = Version()
obj._current_version.addon = obj
obj._current_version._developer_name = data['author']
obj._current_version.supported_locales = data['supported_locales']
obj._current_version.version = data['current_version']
obj._latest_version = Version()
obj._latest_version.is_privileged = is_privileged
obj._geodata = Geodata()
obj.all_previews = [
Preview(id=p['id'], modified=self.to_datetime(p['modified']),
filetype=p['filetype'], sizes=p.get('sizes', {}))
for p in data['previews']]
obj.categories = data['category']
obj.tags_list = data['tags']
obj._device_types = [DEVICE_TYPES[d] for d in data['device']]
obj._is_disabled = data['is_disabled']
# Set base attributes on the "fake" app using the data from ES.
self._attach_fields(
obj, data, ('created', 'default_locale', 'guid', 'icon_hash',
'is_escalated', 'is_offline', 'last_updated',
'hosted_url', 'manifest_url', 'modified',
'premium_type', 'promo_img_hash', 'regions',
'reviewed', 'status'))
# Attach translations for all translated attributes.
self._attach_translations(
obj, data, ('name', 'description', 'homepage',
'support_email', 'support_url'))
if data.get('group_translations'):
self._attach_translations(obj, data, ('group',)) # Feed group.
else:
obj.group_translations = None
# Release notes target and source name differ (ES stores it as
# release_notes but the db field we are emulating is called
# releasenotes without the "_").
ESTranslationSerializerField.attach_translations(
obj._current_version, data, 'release_notes',
target_name='releasenotes')
# Set attributes that have a different name in ES.
obj.public_stats = data['has_public_stats']
# Override obj.get_excluded_region_ids() to just return the list of
# regions stored in ES instead of making SQL queries.
obj.get_excluded_region_ids = lambda: data['region_exclusions']
# Set up payments stuff to avoid extra queries later (we'll still make
# some, because price info is not in ES).
if obj.is_premium():
Webapp.attach_premiums([obj])
# Some methods below will need the raw data from ES, put it on obj.
obj.es_data = data
return obj
def create(self, data):
return self.fake_object(data)
def get_content_ratings(self, obj):
body = (mkt.regions.REGION_TO_RATINGS_BODY().get(
self._get_region_slug(), 'generic'))
prefix = 'has_%s' % body
# Backwards incompat with old index.
for i, desc in enumerate(obj.es_data.get('content_descriptors', [])):
if desc.isupper():
obj.es_data['content_descriptors'][i] = 'has_' + desc.lower()
for i, inter in enumerate(obj.es_data.get('interactive_elements', [])):
if inter.isupper():
obj.es_data['interactive_elements'][i] = 'has_' + inter.lower()
return {
'body': body,
'rating': dehydrate_content_rating(
(obj.es_data.get('content_ratings') or {})
.get(body)) or None,
'descriptors': [key for key in
obj.es_data.get('content_descriptors', [])
if prefix in key],
'descriptors_text': [HUMAN_READABLE_DESCS_AND_INTERACTIVES[key]
for key
in obj.es_data.get('content_descriptors')
if prefix in key],
'interactives': obj.es_data.get('interactive_elements', []),
'interactives_text': [HUMAN_READABLE_DESCS_AND_INTERACTIVES[key]
for key
in obj.es_data.get('interactive_elements')]
}
def get_feature_compatibility(self, app):
# We're supposed to be filtering out incompatible apps anyway, so don't
# bother calculating feature compatibility: if an app is there, it's
# either compatible or the client overrode this by asking to see apps
# for a different platform.
return None
def get_versions(self, obj):
return dict((v['version'], v['resource_uri'])
for v in obj.es_data['versions'])
def get_ratings_aggregates(self, obj):
return obj.es_data.get('ratings', {})
def get_upsell(self, obj):
upsell = obj.es_data.get('upsell', False)
if upsell:
region_id = self.context['request'].REGION.id
exclusions = upsell.get('region_exclusions')
if exclusions is not None and region_id not in exclusions:
upsell['resource_uri'] = reverse('app-detail',
kwargs={'pk': upsell['id']})
else:
upsell = False
return upsell
def get_absolute_url(self, obj):
return absolutify(obj.get_absolute_url())
def get_package_path(self, obj):
return obj.es_data.get('package_path')
def get_file_size(self, obj):
return obj.es_data.get('file_size')
def get_is_homescreen(self, obj):
return obj.es_data.get('is_homescreen')
class BaseESAppFeedSerializer(ESAppSerializer):
icons = serializers.SerializerMethodField()
def get_icons(self, obj):
"""
Only need the 64px icon for Feed.
"""
return {
'64': obj.get_icon_url(64)
}
class ESAppFeedSerializer(BaseESAppFeedSerializer):
"""
App serializer targetted towards the Feed, Fireplace's homepage.
Specifically for Feed Apps/Brands that feature the whole app tile and an
install button rather than just an icon.
"""
class Meta(ESAppSerializer.Meta):
fields = [
'author', 'device_types', 'group', 'icons', 'id',
'is_packaged', 'manifest_url', 'name', 'payment_required',
'premium_type', 'price', 'price_locale', 'ratings', 'slug', 'user'
]
class ESAppFeedCollectionSerializer(BaseESAppFeedSerializer):
"""
App serializer targetted towards the Feed, Fireplace's homepage.
Specifically for Feed Apps, Collections, Shelves that only need app icons.
"""
class Meta(ESAppSerializer.Meta):
fields = [
'device_types', 'icons', 'id', 'slug',
]
class SimpleAppSerializer(AppSerializer):
"""
App serializer with fewer fields (and fewer db queries as a result).
Used as a base for FireplaceAppSerializer and CollectionAppSerializer.
"""
previews = SimplePreviewSerializer(many=True, required=False,
source='all_previews')
class Meta(AppSerializer.Meta):
fields = list(
set(AppSerializer.Meta.fields) - set(
['absolute_url', 'app_type', 'created', 'default_locale',
'package_path', 'payment_account', 'supported_locales',
'upsold', 'tags']))
class SimpleESAppSerializer(ESAppSerializer):
class Meta(SimpleAppSerializer.Meta):
pass
class SuggestionsESAppSerializer(ESAppSerializer):
icon = serializers.SerializerMethodField()
class Meta(ESAppSerializer.Meta):
fields = ['name', 'description', 'absolute_url', 'icon']
def get_icon(self, app):
return app.get_icon_url(64)
class RocketbarESAppSerializer(serializers.Serializer):
"""Used by Firefox OS's Rocketbar apps viewer."""
name = ESTranslationSerializerField()
@property
def data(self):
if getattr(self, '_data', None) is None:
self._data = [self.to_representation(o['payload'])
for o in self.instance]
return self._data
def to_representation(self, obj):
# fake_app is a fake instance because we need to access a couple
# properties and methods on Webapp. It should never hit the database.
self.fake_app = Webapp(
id=obj['id'], icon_type='image/png',
default_locale=obj.get('default_locale', settings.LANGUAGE_CODE),
icon_hash=obj.get('icon_hash'),
modified=es_to_datetime(obj['modified']))
ESTranslationSerializerField.attach_translations(
self.fake_app, obj, 'name')
return {
'name': self.fields['name'].to_representation(
self.fields['name'].get_attribute(self.fake_app)),
'icon': self.fake_app.get_icon_url(64),
'slug': obj['slug'],
'manifest_url': obj['manifest_url'],
}
class RocketbarESAppSerializerV2(AppSerializer, RocketbarESAppSerializer):
"""
Replaced `icon` key with `icons` for various pixel sizes: 128, 64, 48, 32.
"""
def to_representation(self, obj):
data = super(RocketbarESAppSerializerV2, self).to_representation(obj)
del data['icon']
data['icons'] = self.get_icons(self.fake_app)
return data
|
|
"""Utilities for writing code that runs on Python 2 and 3"""
# flake8: noqa
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.3.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = str,
integer_types = (int, int)
class_types = (type, type)
text_type = str
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X:
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr:
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart",
"email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText",
"tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog",
"tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.__next__()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
else:
def get_unbound_function(unbound): # NOQA
return unbound.__func__
class Iterator:
def __next__(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s): # NOQA
return s
def u(s): # NOQA
return str(s, "unicode_escape")
int2byte = chr
import io
StringIO = BytesIO = io.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, str):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, str):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, str):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, str):
want_unicode = True
break
if want_unicode:
newline = str("\n")
space = str(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
|
################################################################################
# (c) [2013] The Johns Hopkins University / Applied Physics Laboratory All Rights Reserved.
# Contact the JHU/APL Office of Technology Transfer for any additional rights. www.jhuapl.edu/ott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
""" Solves a membrane detection/classification problem.
This module provides the toplevel interface for solving a binary
"membrane vs non-membrane" classification problem for EM data sets
(e.g. [1]) using convolutional neural networks.
The overall approach is based on Dan Ciresan's paper [2] and the code
is derived from a LeNet example included in the Theano code base for
MNIST classification.
References:
[1] http://brainiac2.mit.edu/isbi_challenge/
[2] Ciresan, Dan, et al. "Deep neural networks segment neuronal membranes
in electron microscopy images." Advances in neural information
processing systems. 2012.
December 2013, mjp
"""
import os, os.path
import sys, time
import socket
import argparse
import numpy
from PIL import Image
import pdb
import theano
import theano.tensor as T
import em_networks as EMN
from em_utils import *
from tiles import *
def load_membrane_data(trainDataFile, trainLabelsFile,
tileSize,
trainSlices, validSlices,
nZeeChannels=0):
"""Loads data set and creates corresponding tile managers.
"""
# load the volume and the labels
if trainDataFile.endswith('.tif'):
X = load_tiff_data(trainDataFile)
# Assumes raw conference data (i.e. not preprocessed).
#for ii in range(X.shape[0]):
# X[ii,:,:] = X[ii,:,:] - numpy.mean(X[ii,:,:])
#X = X / numpy.max(numpy.abs(X))
print '[%s]: Warning: no longer zero-meaning and scaling data' % __name__
elif trainDataFile.endswith('.npz'):
# assumes volume data is stored as the tensor X and is suitably preprocessed
X = numpy.load(trainDataFile)['X']
else:
raise RuntimeError('unexpected data file extension')
Y = load_tiff_data(trainLabelsFile)
# mirror edges
border = numpy.floor(tileSize/2.)
X = mirror_edges_tensor(X, border)
Y = mirror_edges_tensor(Y, border)
# Use 0 and 1 as class labels. This is actually important because
# the neural network code will use class labels as indices into
# the outputs of the last network layer.
#
# 0 := non-membrane
# 1 := membrane
Y[Y==0] = 1; Y[Y==255] = 0
assert(Y.max() == 1)
X_train = X[trainSlices,:,:]
Y_train = Y[trainSlices,:,:]
X_valid = X[validSlices,:,:]
Y_valid = Y[validSlices,:,:]
# tile managers will put the images into GPU memory via Theano shared vars.
train = TileManager(X_train, Y_train, tileSize=tileSize, nZeeChannels=nZeeChannels)
valid = TileManager(X_valid, Y_valid, tileSize=tileSize, nZeeChannels=nZeeChannels)
return (train, valid, (X, Y))
def random_image_modifiers(flipProb=.6, rotProb=.6):
"""Randomly applies certain transforms to a 2d image.
As of this writing, these transforms are some
combination of flips and rotations.
"""
# clip probabilities to [0,1]
flipProb = max(min(flipProb,1),0)
rotProb = max(min(rotProb,1),0)
flipDim = 0; rotDir = 0
if numpy.random.rand() < flipProb:
flipDim = numpy.sign(numpy.random.rand() - .5)
if numpy.random.rand() < rotProb:
rotDir = numpy.sign(numpy.random.rand() - .5)
return flipDim, rotDir
def train_network(nn, trainMgr, validMgr,
nEpochs=30, learningRate=.001, decay=.995,
maxNumTilesPerEpoch=sys.maxint,
outDir="."):
"""Learns parameters for the given neural network.
"""
p2 = int(numpy.floor(nn.p/2.0))
# compute number of minibatches
nTrainBatches = int(numpy.ceil(trainMgr.batchSize / nn.miniBatchSize))
nValidBatches = int(numpy.ceil(validMgr.batchSize / nn.miniBatchSize))
print '[%s]: # of training batches is %d' % (__name__, nTrainBatches)
# allocate symbolic variables
indexT = T.lscalar() # index to a [mini]batch
learningRateT = T.scalar() # learning rate, theano variable
print '[%s]: initializing Theano...' % __name__
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# functions for the validation data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
predict_validation_data = theano.function([indexT], nn.layers[-1].p_y_given_x,
givens={
nn.x: validMgr.X_batch_GPU[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize]})
#nn.x: validMgr.X_batch_GPU[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize],
#nn.y: validMgr.y_batch_int[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize]})
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# functions for the training data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The cost we minimize during training is the NLL of the model
# Assumes the last layer is the logistic regression layer.
cost = nn.layers[-1].negative_log_likelihood(nn.y)
# create a list of all model parameters to be fit by gradient descent
#params = layer3.params + layer2.params + layer1.params + layer0.params
params = reduce(lambda a,b: a+b, [l.params for l in nn.layers])
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters via
# SGD. Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i],grads[i]) pairs.
updates = []
for param_i, grad_i in zip(params, grads):
updates.append((param_i, param_i - learningRateT * grad_i))
train_model = theano.function([indexT, learningRateT], [cost, nn.layers[-1].p_y_given_x], updates=updates,
givens={
nn.x: trainMgr.X_batch_GPU[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize],
nn.y: trainMgr.y_batch_int[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize]})
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Do the training
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
startTime = time.clock()
trainTime = 0
validTime = 0
lastChatter = -1
nTilesProcessed = 0
nTilesFlipped = 0
nTilesRotated = 0
print '[%s]: Training network.' % __name__
for epoch in xrange(nEpochs):
print '[%s]: Starting epoch %d / %d (net time: %0.2f m)' % (__name__, epoch, nEpochs, (time.clock()-startTime)/60.)
sys.stdout.flush()
prevParams = EMN.save_network_parameters(nn, None) # params just before learning
predictions = numpy.zeros(trainMgr.y_batch_local.shape)
nErrors = 0
for slices,rows,cols,pct in trainMgr.make_balanced_pixel_generator():
# reset predictions
predictions[:] = -1;
# transform images and udpate GPU memory
flipDim,rotDir = random_image_modifiers()
trainMgr.update_gpu(slices, rows, cols, flipDim=flipDim, rotDir=rotDir)
if flipDim != 0: nTilesFlipped += len(slices)
if rotDir != 0: nTilesRotated += len(slices)
# process all mini-batches
for minibatchIdx in xrange(nTrainBatches):
tic = time.clock()
[costij, probij] = train_model(minibatchIdx, learningRate)
trainTime += time.clock()-tic
predij = numpy.argmax(probij,axis=1)
predictions[(minibatchIdx*nn.miniBatchSize):(minibatchIdx+1)*nn.miniBatchSize] = predij
nTilesProcessed += len(slices)
nErrors = numpy.sum(predictions != trainMgr.y_batch_local)
# periodically report progress (e.g. every 30 min)
netTime = time.clock()-startTime
if numpy.floor(netTime/1800) > lastChatter:
print '[%s]: epoch %d; processed %0.2e tiles (%0.2f %%); net time %0.2f m' % (__name__, epoch, nTilesProcessed, pct, netTime/60.)
lastChatter = numpy.floor(netTime/1800)
sys.stdout.flush()
# check for early epoch termination
if nTilesProcessed >= maxNumTilesPerEpoch:
print '[%s]: epoch %d: quitting early after %d tiles processed (%0.2f %%)' % (__name__, epoch, nTilesProcessed, pct)
break
#----------------------------------------
# update learning rate after each training epoch
#----------------------------------------
if decay < 1:
learningRate *= decay
#----------------------------------------
# save result (even though it may just be an intermediate result)
#----------------------------------------
fn = 'params_epoch%02d' % epoch
newParams = EMN.save_network_parameters(nn, os.path.join(outDir, fn), verbose=False)
# report how much the network parameters changed
keys = newParams.keys(); keys.sort()
for key in keys:
delta = numpy.ndarray.flatten(numpy.abs(newParams[key] - prevParams[key]))
print '[%s]: %s (%d params)\n %0.2e / %0.2e / %0.2e / %0.2e' % (__name__, key, len(delta), numpy.min(delta), numpy.max(delta), numpy.mean(delta), numpy.median(delta))
#----------------------------------------
# validation performance
#----------------------------------------
print '[%s]: validating performance ...' % __name__
Y_hat = numpy.zeros(validMgr.Y_local.shape)
for slices,rows,cols in validMgr.make_all_pixel_generator():
# update tiles on the GPU
validMgr.update_gpu(slices,rows,cols,flipDim=0,rotDir=0)
for ii in range(nValidBatches):
# predictions is a (nTiles x 2) matrix
# grab the second output (y=1)
# (i.e. we store probability of membrane)
tic = time.clock()
pMembrane = predict_validation_data(ii)[:,1]
validTime += time.clock() - tic
# Be careful - on the last iteration, there may be
# less than batchSize tiles remaining.
a = ii*nn.miniBatchSize
b = min((ii+1)*nn.miniBatchSize, len(slices))
if a > len(slices): break
Y_hat[slices[a:b], rows[a:b], cols[a:b]] = pMembrane[0:b-a]
# Validation statistics are based on a simple threshold
# (without any other postprocessing).
#
# note: throw away the border before evaluating
Y_true = validMgr.Y_local[:,p2:-p2,p2:-p2]
Y_hat = Y_hat[:,p2:-p2,p2:-p2]
eval_performance(Y_true, Y_hat, 0.5, verbose=True)
eval_performance(Y_true, Y_hat, 0.7, verbose=True)
# statistics for this epoch
print '[%s]: epoch %d complete!' % (__name__, epoch)
print '[%s]: learning rate: %0.2e' % (__name__, learningRate)
print '[%s]: # errors: %d' % (__name__, nErrors)
print '[%s]: net elapsed time: %0.2f m' % (__name__, ((time.clock() - startTime) / 60.))
print '[%s]: net gpu train time: %0.2f m' % (__name__, (trainTime/60.))
print '[%s]: net validation time: %0.2f m' % (__name__, (validTime/60.))
print '[%s]: processed tiles: %0.2e' % (__name__, nTilesProcessed)
print '[%s]: flipped tiles: %0.2e' % (__name__, nTilesFlipped)
print '[%s]: rotated tiles: %0.2e' % (__name__, nTilesRotated)
endTime = time.clock()
print('[%s]: Optimization complete.' % __name__)
print '[%s]: The code for file "%s" ran for %0.2fm' % (__name__, os.path.split(__file__)[1], ((endTime - startTime) / 60.))
print "[%s]: GPU train time: %0.2fm" % (__name__, (trainTime/60.0))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
parser = argparse.ArgumentParser('Train a neural network on the EM data set')
#
# Parameters for defining and training the neural network
#
parser.add_argument('-n', dest='network', type=str, default='LeNetMembraneN3',
help='neural network architecture (use a class name here)')
parser.add_argument('-e', dest='nEpochs', type=int, default=30,
help='number of training epochs')
parser.add_argument('-r', dest='learnRate', type=float, default=0.001,
help='starting learning rate')
parser.add_argument('-d', dest='decay', type=float, default=0.995,
help='learning rate decay')
parser.add_argument('-m', dest='maxNumTilesPerEpoch', type=int, default=sys.maxint,
help='Maximum number of tiles used per epoch. Use this if there are too many tiles to process them all each epoch.')
#
# Data set parameters. Assuming here a data cube, where each xy-plane is a "slice" of the cube.
#
parser.add_argument('-X', dest='trainFileName', type=str, default='train-volume-raw.npz',
help='Name of the file containing the membrane data (i.e. X)')
parser.add_argument('-Y', dest='labelsFileName', type=str, default='train-labels.tif',
help='This is the file containing the class labels (i.e. Y)')
parser.add_argument('--train-slices', dest='trainSlicesExpr', type=str, default='range(1,30)',
help='A python-evaluatable string indicating which slices should be used for training')
parser.add_argument('--valid-slices', dest='validSliceExpr', type=str, default='range(27,30)',
help='A python-evaluatable string indicating which slices should be used for validation')
#
# Some special-case flags
#
parser.add_argument('--redirect-stdout', dest='redirectStdout', type=int, default=0,
help='set to 1 to send stdout to log.txt')
parser.add_argument('-c', dest='nZeeChannels', type=int, default=0,
help='number of "mirror" channels')
args = parser.parse_args()
# define and create output directory
host = socket.gethostname()
deviceAndDate = theano.config.device + '_' + time.strftime('%d-%m-%Y')
outDir = os.path.join(host, deviceAndDate, '%s_%03d_%0.4f_%0.4f' % (args.network, args.nEpochs, args.learnRate, args.decay))
if not os.path.isdir(outDir): os.makedirs(outDir)
# Redirect stdout, if asked to do so
if args.redirectStdout:
fn = os.path.join(outDir, 'log.txt')
sys.stdout = open(fn, 'w')
# Set up train/valid slices. Using eval() might not be ideal, but
# provides an easy way for the caller to define train/validation.
trainSlices = eval(args.trainSlicesExpr)
validSlices = eval(args.validSliceExpr)
# create a neural network instance
clazz = getattr(EMN, args.network)
nn = clazz(nChannels=1+2*args.nZeeChannels)
print '[%s]: Using the following parameters:' % __name__
print ' start time: %s' % time.ctime()
print ' host: %s' % host
print ' device: %s' % theano.config.device
print ' pid: %s' % os.getpid()
print ' train data: %s' % args.trainFileName
print ' train labels: %s' % args.labelsFileName
print ' train slices: %s' % trainSlices
print ' valid slices: %s' % validSlices
print ' network: %s' % nn.__class__.__name__
print ' # epochs: %d' % args.nEpochs
print ' max # tiles/epoch: %d' % args.maxNumTilesPerEpoch
print ' learn rate: %0.3f' % args.learnRate
print ' decay: %0.3f' % args.decay
print ' tile size: %d' % nn.p
for idx,l in enumerate(nn.layers):
print ' layer %d: ' % idx,
print str(l.W.get_value().shape)
print ' z-channels: %d' % args.nZeeChannels
print ' output dir: %s' % outDir
print '[%s]: Loading data...' % __name__
(train,valid,membraneData) = load_membrane_data(args.trainFileName, args.labelsFileName,
tileSize=nn.p,
trainSlices=trainSlices,
validSlices=validSlices,
nZeeChannels=args.nZeeChannels)
print ' train dim: %d x %d x %d' % (train.X_local.shape)
print ' valid dim: %d x %d x %d' % (valid.X_local.shape)
print ' valid slices: %s' % (validSlices)
#--------------------------------------------------
# Do the work
#--------------------------------------------------
# train the neural network
train_network(nn, train, valid,
learningRate=args.learnRate,
decay=args.decay,
nEpochs=args.nEpochs,
maxNumTilesPerEpoch=args.maxNumTilesPerEpoch,
outDir=outDir)
|
|
"""
This module implements multioutput regression and classification.
The estimators provided in this module are meta-estimators: they require
a base estimator to be provided in their constructor. The meta-estimator
extends single output estimators to multioutput estimators.
"""
# Author: Tim Head <betatim@gmail.com>
# Author: Hugo Bowne-Anderson <hugobowne@gmail.com>
# Author: Chris Rivera <chris.richard.rivera@gmail.com>
# Author: Michael Williamson
# Author: James Ashton Nichols <james.ashton.nichols@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from abc import ABCMeta
from .base import BaseEstimator, clone
from .base import RegressorMixin, ClassifierMixin
from .utils import check_array, check_X_y
from .utils.fixes import parallel_helper
from .utils.validation import check_is_fitted, has_fit_parameter
from .utils.metaestimators import if_delegate_has_method
from .externals.joblib import Parallel, delayed
from .externals import six
__all__ = ["MultiOutputRegressor", "MultiOutputClassifier"]
def _fit_estimator(estimator, X, y, sample_weight=None):
estimator = clone(estimator)
if sample_weight is not None:
estimator.fit(X, y, sample_weight=sample_weight)
else:
estimator.fit(X, y)
return estimator
def _partial_fit_estimator(estimator, X, y, classes=None, sample_weight=None,
first_time=True):
if first_time:
estimator = clone(estimator)
if sample_weight is not None:
if classes is not None:
estimator.partial_fit(X, y, classes=classes,
sample_weight=sample_weight)
else:
estimator.partial_fit(X, y, sample_weight=sample_weight)
else:
if classes is not None:
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
return estimator
class MultiOutputEstimator(six.with_metaclass(ABCMeta, BaseEstimator)):
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets.
classes : list of numpy arrays, shape (n_outputs)
Each array is unique classes for one output in str/int
Can be obtained by via
``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where y is the
target matrix of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y,
multi_output=True,
accept_sparse=True)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
first_time = not hasattr(self, 'estimators_')
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_estimator)(
self.estimators_[i] if not first_time else self.estimator,
X, y[:, i],
classes[i] if classes is not None else None,
sample_weight, first_time) for i in range(y.shape[1]))
return self
def fit(self, X, y, sample_weight=None):
""" Fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets. An indicator matrix turns on multilabel
estimation.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
Returns self.
"""
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement a fit method")
X, y = check_X_y(X, y,
multi_output=True,
accept_sparse=True)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, i], sample_weight)
for i in range(y.shape[1]))
return self
def predict(self, X):
"""Predict multi-output variable using a model
trained for each target variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
Returns
-------
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimator, "predict"):
raise ValueError("The base estimator should implement a predict method")
X = check_array(X, accept_sparse=True)
y = Parallel(n_jobs=self.n_jobs)(
delayed(parallel_helper)(e, 'predict', X)
for e in self.estimators_)
return np.asarray(y).T
class MultiOutputRegressor(MultiOutputEstimator, RegressorMixin):
"""Multi target regression
This strategy consists of fitting one regressor per target. This is a
simple strategy for extending regressors that do not natively support
multi-target regression.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and `predict`.
n_jobs : int, optional, default=1
The number of jobs to run in parallel for `fit`. If -1,
then the number of jobs is set to the number of cores.
When individual estimators are fast to train or predict
using `n_jobs>1` can result in slower performance due
to the overhead of spawning processes.
"""
def __init__(self, estimator, n_jobs=1):
super(MultiOutputRegressor, self).__init__(estimator, n_jobs)
def partial_fit(self, X, y, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
Returns self.
"""
super(MultiOutputRegressor, self).partial_fit(
X, y, sample_weight=sample_weight)
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Notes
-----
R^2 is calculated by weighting all the targets equally using
`multioutput='uniform_average'`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Test samples.
y : array-like, shape (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
# XXX remove in 0.19 when r2_score default for multioutput changes
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='uniform_average')
class MultiOutputClassifier(MultiOutputEstimator, ClassifierMixin):
"""Multi target classification
This strategy consists of fitting one classifier per target. This is a
simple strategy for extending classifiers that do not natively support
multi-target classification
Parameters
----------
estimator : estimator object
An estimator object implementing `fit`, `score` and `predict_proba`.
n_jobs : int, optional, default=1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
The number of jobs to use for the computation.
It does each target variable in y in parallel.
Attributes
----------
estimators_ : list of `n_output` estimators
Estimators used for predictions.
"""
def __init__(self, estimator, n_jobs=1):
super(MultiOutputClassifier, self).__init__(estimator, n_jobs)
def predict_proba(self, X):
"""Probability estimates.
Returns prediction probabilites for each class of each output.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs \
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimator, "predict_proba"):
raise ValueError("The base estimator should implement"
"predict_proba method")
results = [estimator.predict_proba(X) for estimator in
self.estimators_]
return results
def score(self, X, y):
""""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Test samples
y : array-like, shape [n_samples, n_outputs]
True values for X
Returns
-------
scores : float
accuracy_score of self.predict(X) versus y
"""
check_is_fitted(self, 'estimators_')
n_outputs_ = len(self.estimators_)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi target classification but has only one")
if y.shape[1] != n_outputs_:
raise ValueError("The number of outputs of Y for fit {0} and"
" score {1} should be same".
format(n_outputs_, y.shape[1]))
y_pred = self.predict(X)
return np.mean(np.all(y == y_pred, axis=1))
|
|
# Third-party imports
import pytest
# Local imports
from uplink import commands, converters, arguments, utils
class TestHttpMethodFactory(object):
def test_call_as_decorator_with_no_args(self):
@commands.HttpMethodFactory(None)
def func():
pass
assert isinstance(func, commands.RequestDefinitionBuilder)
def test_call_as_decorator_with_args(self):
method_factory = commands.HttpMethodFactory(None)
@method_factory(None)
def func():
pass
assert isinstance(func, commands.RequestDefinitionBuilder)
class TestHttpMethod(object):
def test_call(self, mocker, annotation_mock):
# Setup
def func():
pass
sig = utils.Signature(
args=["self", "arg1", "arg2"],
annotations={"arg1": annotation_mock},
return_annotation=None,
)
mocker.patch("uplink.utils.get_arg_spec").return_value = sig
http_method = commands.HttpMethod("METHOD", uri="/{hello}")
builder = http_method(func)
assert isinstance(builder, commands.RequestDefinitionBuilder)
assert builder.method == "METHOD"
assert list(builder.uri.remaining_variables) == ["hello"]
missing_arguments = builder.argument_handler_builder.missing_arguments
expected_missing = set(sig.args[1:]) - set(sig.annotations.keys())
assert set(missing_arguments) == expected_missing
def test_call_with_return_annotation(self, mocker):
# Setup
def func():
pass
sig = utils.Signature(
args=[], annotations={}, return_annotation="return_annotation"
)
mocker.patch("uplink.utils.get_arg_spec").return_value = sig
returns = mocker.patch("uplink.returns.schema")
http_method = commands.HttpMethod("METHOD", uri="/{hello}")
http_method(func)
# Verify: build is wrapped with decorators.returns
returns.assert_called_with(sig.return_annotation)
def test_call_with_args(self, mocker, annotation_mock):
# Setup
def func():
pass
args = mocker.patch("uplink.decorators.args")
# Verify: using sequence
http_method = commands.HttpMethod(
"METHOD", uri="/{hello}", args=(annotation_mock,)
)
http_method(func)
args.assert_called_with(annotation_mock)
# Verify: using mapping
http_method = commands.HttpMethod(
"METHOD", uri="/{hello}", args={"arg1": "value"}
)
http_method(func)
args.assert_called_with(arg1="value")
class TestURIDefinitionBuilder(object):
def test_is_static(self):
assert not commands.URIDefinitionBuilder(None).is_static
def test_is_dynamic_setter(self):
uri = commands.URIDefinitionBuilder(None)
assert not uri.is_dynamic
uri.is_dynamic = True
assert uri.is_dynamic
def test_is_dynamic_setter_fails_when_is_static(self):
uri = commands.URIDefinitionBuilder(True)
assert uri.is_static
with pytest.raises(ValueError):
uri.is_dynamic = True
def test_remaining_variables(self):
uri = commands.URIDefinitionBuilder("/path/with/{variable}")
assert uri.remaining_variables == set(["variable"])
def test_add_variable(self):
uri = commands.URIDefinitionBuilder("/path/with/{variable}")
assert "variable" in uri.remaining_variables
uri.add_variable("variable")
assert "variable" not in uri.remaining_variables
def test_add_variable_raise_error_when_name_is_not_in_static_path(self):
uri = commands.URIDefinitionBuilder("/static/path")
with pytest.raises(ValueError):
uri.add_variable("variable")
def test_build(self):
uri = commands.URIDefinitionBuilder("/static/path")
assert uri.build() == "/static/path"
def test_build_fails_when_variable_remain_in_uri(self):
uri = commands.URIDefinitionBuilder("/path/with/{variable}")
with pytest.raises(commands.MissingUriVariables):
uri.build()
class TestRequestDefinitionBuilder(object):
def test_method_handler_builder_getter(
self, annotation_handler_builder_mock
):
builder = commands.RequestDefinitionBuilder(
None,
None,
None,
type(annotation_handler_builder_mock)(),
annotation_handler_builder_mock,
)
assert builder.method_handler_builder is annotation_handler_builder_mock
def test_build(self, mocker, annotation_handler_builder_mock):
argument_handler_builder = type(annotation_handler_builder_mock)()
method_handler_builder = annotation_handler_builder_mock
uri_definition_builder = mocker.Mock(spec=commands.URIDefinitionBuilder)
builder = commands.RequestDefinitionBuilder(
None,
"method",
uri_definition_builder,
argument_handler_builder,
method_handler_builder,
)
definition = builder.build()
assert isinstance(definition, commands.RequestDefinition)
assert uri_definition_builder.build.called
assert argument_handler_builder.build.called
assert method_handler_builder.build.called
def test_auto_fill_when_not_done(
self, mocker, annotation_handler_builder_mock
):
# Setup
argument_handler_builder = mocker.Mock(
stub=arguments.ArgumentAnnotationHandlerBuilder
)
method_handler_builder = annotation_handler_builder_mock
uri_definition_builder = mocker.Mock(spec=commands.URIDefinitionBuilder)
builder = commands.RequestDefinitionBuilder(
None,
"method",
uri_definition_builder,
argument_handler_builder,
method_handler_builder,
)
# Setup success condition
argument_handler_builder.is_done.return_value = False
argument_handler_builder.missing_arguments = ["arg1"]
uri_definition_builder.remaining_variables = ["arg1"]
# Verify
builder.build()
argument_handler_builder.set_annotations.assert_called_with(
{"arg1": arguments.Path}
)
def test_auto_fill_when_not_done_fails(
self, mocker, annotation_handler_builder_mock
):
# Setup
argument_handler_builder = annotation_handler_builder_mock
method_handler_builder = annotation_handler_builder_mock
uri_definition_builder = mocker.Mock(spec=commands.URIDefinitionBuilder)
builder = commands.RequestDefinitionBuilder(
None,
"method",
uri_definition_builder,
argument_handler_builder,
method_handler_builder,
)
# Setup fail condition: Argument is missing annotation
argument_handler_builder.is_done.return_value = False
argument_handler_builder.missing_arguments = ["arg1"]
uri_definition_builder.remaining_variables = []
# Verify
with pytest.raises(commands.MissingArgumentAnnotations):
builder.build()
class TestRequestDefinition(object):
def test_argument_annotations(self, annotation_handler_mock):
annotation_handler_mock.annotations = ["arg1", "arg2"]
definition = commands.RequestDefinition(
None, None, None, annotation_handler_mock, None
)
assert list(definition.argument_annotations) == ["arg1", "arg2"]
def test_method_annotations(self, annotation_handler_mock):
annotation_handler_mock.annotations = ["arg1", "arg2"]
definition = commands.RequestDefinition(
None, None, None, None, annotation_handler_mock
)
assert list(definition.method_annotations) == ["arg1", "arg2"]
def test_define_request(self, request_builder, mocker):
method = "method"
uri = "uri"
definition = commands.RequestDefinition(
method, uri, str, mocker.Mock(), mocker.Mock()
)
definition.define_request(request_builder, (), {})
assert request_builder.method == method
assert request_builder.relative_url == uri
assert request_builder.return_type is str
def test_make_converter_registry(self, annotation_handler_mock):
definition = commands.RequestDefinition(
"method",
"uri",
None,
annotation_handler_mock,
annotation_handler_mock,
)
annotation_handler_mock.annotations = ("annotation",)
registry = definition.make_converter_registry(())
assert isinstance(registry, converters.ConverterFactoryRegistry)
|
|
# -*- encoding: utf-8 -*-
import ConfigParser
import datetime
from functools import wraps
import json
import os
import re
import requests
import time
from rdopkg import exception
from rdopkg.utils import cmd
from rdopkg.utils import log
COPR_URL = 'https://copr.fedoraproject.org'
COPR_RESULTS_URL = 'http://copr-be.cloud.fedoraproject.org/results'
def fpo_url(pkg, user):
return "http://%s.fedorapeople.org/copr/%s" % (user, pkg)
def upload_fpo(pkg, user):
dst_host = user + '@fedorapeople.org'
dst_path = '~/public_html/copr'
dst = '%s:%s/%s' % (dst_host, dst_path, pkg)
_cmd = ['scp', pkg, dst]
url = fpo_url(pkg, user)
try:
cmd.run(*_cmd)
except exception.CommandFailed as ex:
err = ex.kwargs['out'].stderr
# TODO: fragile, use paramiko instead?
if not re.match('scp: .*No such file or directory', err):
raise
log.info("Creating remote dir: %s:%s" % (dst_host, dst_path))
cmd.run('ssh', dst_host, 'mkdir -p ' + dst_path)
cmd.run(*_cmd)
return url
def rdo_copr_name(release, dist):
return 'rdo-%s-%s' % (release, dist)
def copr_fetcher_id(srpm_url):
_, _, srpm = srpm_url.rpartition('/')
if srpm.endswith('.src.rpm'):
srpm = srpm[:-8]
return srpm
def _get_copr_data(req, user, type=None):
if '<title>Sign in Coprs</title>' in req.text:
raise exception.CoprError(code=403, error='Invalid API token')
if req.status_code == 404:
raise exception.CoprError(code=req.status_code,
error="404 for user %s" % user.get('username'))
try:
output = json.loads(req.text)
except ValueError:
raise exception.CoprError(code=req.status_code,
error="Invalid response (not JSON):\n%s" % req.text)
if req.status_code != 200:
msg = "[%s] %s" % (req.status_code, output['error'])
if (type == 'new_build'
and req.status_code == 500
and output.get('error') == 'Invalid request'):
msg += ("\nThis funny copr response might mean you don't have "
"permission to build in this copr. Or not. Hahaha.")
raise exception.CoprError(code=req.status_code,
copr_msg=output.get('error'),
error=msg)
return output
def get_copr_conf_fn():
return os.path.join(os.path.expanduser('~'), '.config', 'copr')
def get_copr_user():
config = ConfigParser.ConfigParser()
config_fn = get_copr_conf_fn()
if not config.read(config_fn):
raise exception.CoprError(
error="Configuration file %s not found.\n"
"See `man copr-cli` for more information" % config_fn)
try:
username = config.get('copr-cli', 'username', None)
login = config.get('copr-cli', 'login', None)
token = config.get('copr-cli', 'token', None)
except ConfigParser.Error, err:
raise exception.CoprError(
'Bad configuration file %s: %s' % (config_fn, err))
return {'username': username, 'token': token, 'login': login}
def get_copr_url():
config = ConfigParser.ConfigParser()
config.read(get_copr_conf_fn())
copr_url = COPR_URL
if (config.has_section('copr-cli') and
config.has_option('copr-cli', 'copr_url')):
copr_url = config.get('copr-cli', 'copr_url')
return copr_url
def need_user(f):
@wraps(f)
def wrapper(*args, **kwargs):
obj = args[0]
if not obj.user:
obj.user = get_copr_user()
return f(*args, **kwargs)
return wrapper
class RdoCoprs(object):
def __init__(self, owner='jruzicka', copr_url=None, copr_results_url=None):
self.owner = owner
self.user = {}
self.copr_url = copr_url or get_copr_url()
self.copr_results_url = copr_results_url or COPR_RESULTS_URL
def copr_api_url(self, path):
return "%s/api/%s" % (self.copr_url, path)
def get_builds_url(self, release, dist):
copr = rdo_copr_name(release, dist)
return '{0}/coprs/{1}/{2}/builds'.format(
self.copr_url, self.owner, copr)
def get_repo_url(self, release, dist):
copr = rdo_copr_name(release, dist)
return '{0}/{1}/{2}/{3}-x86_64'.format(
self.copr_results_url, self.owner, copr, dist)
@need_user
def _fetch_build_status(self, build_id):
url = self.copr_api_url('coprs/build_status/%s/' % build_id)
req = requests.get(url, auth=(self.user['login'], self.user['token']))
output = _get_copr_data(req, self.user)
if 'status' in output:
return output['status']
if 'error' in output:
raise exception.CoprError(error=output['error'])
raise exception.CoprError(
error="Build status query returned no results.")
@need_user
def new_build(self, srpm_url, release, dist, watch=False):
copr = rdo_copr_name(release, dist)
url = self.copr_api_url('coprs/%s/%s/new_build/' % (self.owner, copr))
data = {
'pkgs': srpm_url,
}
req = requests.post(url,
auth=(self.user['login'], self.user['token']),
data=data)
output = _get_copr_data(req, self.user, type='new_build')
build_ids = output.get('ids')
if not build_ids:
raise exception.CoprError(
error="copr didn't return id of new build.(?!)")
build_id = build_ids[0]
if watch:
log.info("\nWatching build (may be safely interrupted)...")
prevstatus = None
try:
while True:
try:
status = self._fetch_build_status(build_id)
except exception.CoprError as ex:
log.warn("Failed to get build status: %s" % ex)
break
if prevstatus != status:
now = datetime.datetime.now()
if status in ['pending', 'waiting', 'running']:
cstatus = log.term.bold(status)
elif status == 'succeeded':
cstatus = log.term.good(status)
elif status == 'failed':
cstatus = log.term.error(status)
elif status == 'skipped':
cstatus = ("{t.magenta}{st}{t.normal} (build "
"already done)".format(t=log.term,
st=status))
else:
cstatus = log.term.warn(status)
log.info("[%s] %s" % (now.strftime('%H:%M:%S'),
cstatus))
prevstatus = status
if status in ['succeeded', 'failed', 'canceled', 'skipped']:
break
time.sleep(60)
except KeyboardInterrupt:
pass
except Exception as ex:
log.warn("Error during copr build monitoring: %s" % ex)
return build_id
@need_user
def ensure_cli_setup(self):
# sadly, I found no better way, copr API sux
try:
build_id = self.new_build('X', 'icehouse', 'epel-7')
except exception.CoprError as ex:
code = ex.kwargs.get('code')
if code != 500 or ex.kwargs.get('copr_msg') != 'Invalid request':
raise
|
|
import unittest
from copy import deepcopy
from pulsar.utils.structures import AttributeDictionary
from .utils import TestFailure, LOGGER
__all__ = ['Plugin',
'TestStream',
'TestRunner',
'TestResult']
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
def def_return_val(c):
return None
class Plugin:
'''Interface for all classes which are part of the :class:`.TestRunner`.
Most classes used by the test application are plugins, for
example the :class:`.TestRunner` itself,
the :class:`.TestResult` and the :class:`.TestPlugin`.
'''
result = None
'''An optional result'''
stream = None
'''handle for writing text on the default output.
Set by the :class:`.TestRunner` at runtime.
'''
descriptions = None
def configure(self, cfg):
'''Called once just after construction of a :class:`.TestRunner`
and **before any test class is loaded**.
This is a chance to configure the :class:`.Plugin` or global variables
which may affect the way tests are run.
If it returns something other than ``None`` (for example an abort
message) it will stop the configuration of all subsequent
plugins and quit the test.
:parameter cfg: a :class:`.Config`.
:return: ``None`` unless the tests runner must be stopped.
'''
pass
@property
def name(self):
return self.__class__.__name__.lower()
@property
def count(self):
return self.result.count if self.result else 0
@property
def testsRun(self):
return self.result.testsRun if self.result else 0
def on_start(self):
'''Called by the :class:`.TestSuite` once only at startup.
This callback is invoked once all tests are loaded but before
the test suite starts running them.
'''
pass
def on_end(self):
'''Called by the :class:`.TestSuite` just before it stops.
'''
pass
def loadTestsFromTestCase(self, testcls):
'''Called when loading tests from the ``testcls`` class.
Can be used to modify the number of test functions loaded.'''
pass
def startTestClass(self, testcls):
'''Called just before a ``testcls`` runs its tests.
'''
pass
def stopTestClass(self, testcls):
'''Called just after a ``testcls`` has run its tests.
'''
pass
def startTest(self, test):
'''Called just before a ``test`` function is executed.
This is run just before ``_pre_setup`` method.
'''
pass
def stopTest(self, test):
'''Called just after a ``test`` function has finished.
This is run just after the ``_post_teardown`` method.
'''
pass
def before_test_function_run(self, test, local):
'''Can be used by plugins to manipulate the ``test``
behaviour in the process domain where the test run.'''
return test
def after_test_function_run(self, test, local):
'''Executed in the ``test`` process domain, after the ``test`` has
finished.'''
pass
def addSuccess(self, test):
'''Called when a ``test`` function succeed
'''
pass
def addFailure(self, test, err):
'''Called when a ``test`` function as a (test) failure
'''
pass
def addError(self, test, err):
'''Called when a ``test`` function as an (unexpected) error
'''
pass
def addExpectedFailure(self, test, err):
pass
def addSkip(self, test, reason):
pass
def printErrors(self):
pass
def printSummary(self, timeTaken):
pass
def import_module(self, mod):
return mod
def getDescription(self, test):
doc_first_line = test.shortDescription()
teststr = test.tag
if teststr.endswith(test._testMethodName):
teststr = teststr[:-len(test._testMethodName)-1]
teststr = '%s.%s' % (teststr, test)
if self.descriptions and doc_first_line:
return '\n'.join((teststr, doc_first_line))
else:
return teststr
class TestStream(Plugin): # pragma nocover
'''Handle the writing of test results'''
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, result, descriptions=True):
self._handlers = {}
self.stream = stream
self.result = result
self.descriptions = descriptions
self.showAll = False
self.dots = True
def configure(self, cfg):
verbosity = cfg.verbosity
self.showAll = verbosity > 1
self.dots = verbosity == 1
def handler(self, name):
return self._handlers.get(name, self.stream)
def startTest(self, test):
if self.showAll:
self.head(test, 'Started')
def head(self, test, v):
v = self.getDescription(test) + ' ... %s\n' % v
self.stream.write(v)
self.stream.flush()
def addSuccess(self, test):
if self.showAll:
self.head(test, 'OK')
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
if self.showAll:
self.head(test, 'ERROR')
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
if self.showAll:
self.head(test, "FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
if self.showAll:
self.head(test, "skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
if self.showAll:
self.head(test, "expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
if self.showAll:
self.head(test, "unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.result.errors)
self.printErrorList('FAIL', self.result.failures)
return True
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, test))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
def printSummary(self, timeTaken):
'''Write the summuray of tests results.'''
stream = self.stream
result = self.result
self.printErrors()
run = result.testsRun
stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
stream.writeln(" (%s)" % (", ".join(infos),))
else:
stream.write("\n")
return True
class TestResult(Plugin):
'''A :class:`.Plugin` for collecting results/failures for test runs.
Each :class:`.Plugin` can access the :class:`.TestRunner` ``result``
object via the :attr:`~Plugin.result` attribute.
'''
def __init__(self, descriptions=True):
self.descriptions = descriptions
self._testsRun = 0
self._count = 0
self.failures = []
self.errors = []
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
@property
def count(self):
return self._count
@property
def testsRun(self):
return self._testsRun
@property
def result(self):
return self
def startTest(self, test):
'''Increase the test counter
'''
self._testsRun += 1
def addError(self, test, err): # pragma nocover
'''Called when an unexpected error has occurred.
``err`` is a tuple of values as returned by ``sys.exc_info()``
'''
self._add_error(test, err, self.errors)
def addFailure(self, test, err): # pragma nocover
'''Called when an test failure has occurred.
``err`` is a tuple of values as returned by ``sys.exc_info()``
'''
self._add_error(test, err, self.failures)
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((self.getDescription(test), reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occurred."""
self._add_error(test, err, self.expectedFailures)
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(self.getDescription(test))
def _add_error(self, test, exc, container):
if not isinstance(exc, TestFailure):
exc = TestFailure(exc)
test = self.getDescription(test)
container.append((test, str(exc)))
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def testsafe(name, return_val=None):
if not return_val:
return_val = def_return_val
def _(self, *args):
for p in self.plugins:
try:
c = getattr(p, name)(*args)
if c is not None:
return return_val(c)
except Exception: # pragma nocover
LOGGER.exception('Unhadled error in %s.%s' % (p, name))
return _
class TestRunner(Plugin):
'''A :class:`.Plugin` for asynchronously running tests.
'''
def __init__(self, plugins, stream, writercls=None, descriptions=True):
self.descriptions = descriptions
self.plugins = []
writercls = writercls or TestStream
result = TestResult(descriptions=self.descriptions)
stream = writercls(stream, result, descriptions=self.descriptions)
for p in plugins:
p = deepcopy(p)
p.descriptions = self.descriptions
p.result = result
p.stream = stream
self.plugins.append(p)
self.plugins.append(result)
self.plugins.append(stream)
self.stream = stream
self.result = result
self.loader = unittest.TestLoader()
configure = testsafe('configure', lambda c: c)
on_start = testsafe('on_start')
on_end = testsafe('on_end')
startTestClass = testsafe('startTestClass')
stopTestClass = testsafe('stopTestClass')
startTest = testsafe('startTest')
stopTest = testsafe('stopTest')
addSuccess = testsafe('addSuccess')
addFailure = testsafe('addFailure')
addExpectedFailure = testsafe('addExpectedFailure')
addError = testsafe('addError')
addSkip = testsafe('addSkip')
printErrors = testsafe('printErrors')
printSummary = testsafe('printSummary')
def loadTestsFromTestCase(self, test_cls):
'''Load all ``test`` functions for the ``test_cls``
'''
c = testsafe('loadTestsFromTestCase', lambda v: v)(self, test_cls)
if c is None:
return self.loader.loadTestsFromTestCase(test_cls)
else:
return c
def import_module(self, mod):
for p in self.plugins:
mod = p.import_module(mod)
if not mod:
return
return mod
def before_test_function_run(self, test):
'''Called just before the test is run'''
test.plugins = plugins = {}
for p in self.plugins:
local = AttributeDictionary()
plugins[p.name] = local
test = p.before_test_function_run(test, local) or test
return test
def after_test_function_run(self, test):
'''Called just after the test has finished,
in the test process domain.'''
for p in self.plugins:
local = test.plugins.get(p.name)
if local is not None:
p.after_test_function_run(test, local)
|
|
# sqlalchemy_dremio/pyodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Support for Dremio via pyodbc.
pyodbc is available at:
http://pypi.python.org/pypi/pyodbc/
Connecting
^^^^^^^^^^
Examples of pyodbc connection string URLs:
* ``dremio+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``.
"""
from .base import DremioExecutionContext, DremioDialect
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy import types as sqltypes, util
from sqlalchemy import util
import decimal
import platform
import re
import six
class _DremioNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_DremioNumeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int)-1)))
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
class DremioExecutionContext_pyodbc(DremioExecutionContext):
pass
class DremioDialect_pyodbc(PyODBCConnector, DremioDialect):
execution_ctx_cls = DremioExecutionContext_pyodbc
driver_for_platf = {
'Linux 64bit': '/opt/dremio-odbc/lib64/libdrillodbc_sb64.so',
'Linux 32bit': '/opt/dremio-odbc/lib32/libdrillodbc_sb32.so',
'Windows' : 'Dremio Connector',
'Darwin': 'Dremio Connector'
}
platf = platform.system() + (' ' + platform.architecture()[0] if platform.system() == 'Linux' else '')
drv = driver_for_platf[platf]
pyodbc_driver_name = drv
colspecs = util.update_copy(
DremioDialect.colspecs,
{
sqltypes.Numeric:_DremioNumeric_pyodbc
}
)
def __init__(self, **kw):
kw.setdefault('convert_unicode', True)
super(DremioDialect_pyodbc, self).__init__(**kw)
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
keys = opts
query = url.query
connect_args = {}
for param in ('ansi', 'unicode_results', 'autocommit'):
if param in keys:
connect_args[param.upper()] = util.asbool(keys.pop(param))
if 'odbc_connect' in keys:
connectors = [util.unquote_plus(keys.pop('odbc_connect'))]
else:
def check_quote(token):
if ";" in str(token):
token = "'%s'" % token
return token
keys = dict(
(k.lower(), check_quote(v)) for k, v in keys.items()
)
dsn_connection = 'dsn' in keys or \
('host' in keys and 'database' not in keys)
if dsn_connection:
connectors = ['DSN=%s' % (keys.pop('host', '') or
keys.pop('dsn', ''))]
connectors.extend(
[
'HOST=',
'PORT=',
'Schema='
])
else:
port = ''
if 'port' in keys and 'port' not in query:
port = '%d' % int(keys.pop('port'))
connectors = []
driver = keys.pop('driver', self.pyodbc_driver_name)
if driver is None:
util.warn(
"No driver name specified; "
"this is expected by PyODBC when using "
"DSN-less connections")
else:
connectors.append("DRIVER={%s}" % driver)
connectors.extend(
[
'HOST=%s' % keys.pop('host', ''),
'PORT=%s' % port,
'Schema=%s' % keys.pop('database', '')
])
user = keys.pop("user", None)
if user and 'password' in keys:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % keys.pop('password', ''))
elif user and 'password' not in keys:
pass
else:
connectors.append("Trusted_Connection=Yes")
# if set to 'Yes', the ODBC layer will try to automagically
# convert textual data from your database encoding to your
# client encoding. This should obviously be set to 'No' if
# you query a cp1253 encoded database from a latin1 client...
if 'odbc_autotranslate' in keys:
connectors.append("AutoTranslate=%s" %
keys.pop("odbc_autotranslate"))
connectors.append('INTTYPESINRESULTSIFPOSSIBLE=y')
connectors.extend(['%s=%s' % (k, v) for k, v in keys.items()])
return [[";".join(connectors)], connect_args]
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
error_codes = {
'40004', # Connection lost.
'40009', # Connection lost after internal server error.
'40018', # Connection lost after system running out of memory.
'40020', # Connection lost after system running out of memory.
}
dremio_error_codes = {
'HY000': ( # Generic dremio error code
re.compile(six.u(r'operation timed out'), re.IGNORECASE),
re.compile(six.u(r'connection lost'), re.IGNORECASE),
re.compile(six.u(r'Socket closed by peer'), re.IGNORECASE),
)
}
error_code, error_msg = e.args[:2]
# import pdb; pdb.set_trace()
if error_code in dremio_error_codes:
# Check dremio error
for msg_re in dremio_error_codes[error_code]:
if msg_re.search(error_msg):
return True
return False
# Check Pyodbc error
return error_code in error_codes
return super(DremioDialect_pyodbc, self).is_disconnect(e, connection, cursor)
|
|
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/profiling/basic_timers.c',
'src/core/profiling/stap_timers.c',
'src/core/support/alloc.c',
'src/core/support/avl.c',
'src/core/support/cmdline.c',
'src/core/support/cpu_iphone.c',
'src/core/support/cpu_linux.c',
'src/core/support/cpu_posix.c',
'src/core/support/cpu_windows.c',
'src/core/support/env_linux.c',
'src/core/support/env_posix.c',
'src/core/support/env_win32.c',
'src/core/support/file.c',
'src/core/support/file_posix.c',
'src/core/support/file_win32.c',
'src/core/support/histogram.c',
'src/core/support/host_port.c',
'src/core/support/log.c',
'src/core/support/log_android.c',
'src/core/support/log_linux.c',
'src/core/support/log_posix.c',
'src/core/support/log_win32.c',
'src/core/support/murmur_hash.c',
'src/core/support/slice.c',
'src/core/support/slice_buffer.c',
'src/core/support/stack_lockfree.c',
'src/core/support/string.c',
'src/core/support/string_posix.c',
'src/core/support/string_win32.c',
'src/core/support/subprocess_posix.c',
'src/core/support/sync.c',
'src/core/support/sync_posix.c',
'src/core/support/sync_win32.c',
'src/core/support/thd.c',
'src/core/support/thd_posix.c',
'src/core/support/thd_win32.c',
'src/core/support/time.c',
'src/core/support/time_posix.c',
'src/core/support/time_precise.c',
'src/core/support/time_win32.c',
'src/core/support/tls_pthread.c',
'src/core/httpcli/httpcli_security_connector.c',
'src/core/security/base64.c',
'src/core/security/client_auth_filter.c',
'src/core/security/credentials.c',
'src/core/security/credentials_metadata.c',
'src/core/security/credentials_posix.c',
'src/core/security/credentials_win32.c',
'src/core/security/google_default_credentials.c',
'src/core/security/handshake.c',
'src/core/security/json_token.c',
'src/core/security/jwt_verifier.c',
'src/core/security/secure_endpoint.c',
'src/core/security/security_connector.c',
'src/core/security/security_context.c',
'src/core/security/server_auth_filter.c',
'src/core/security/server_secure_chttp2.c',
'src/core/surface/init_secure.c',
'src/core/surface/secure_channel_create.c',
'src/core/tsi/fake_transport_security.c',
'src/core/tsi/ssl_transport_security.c',
'src/core/tsi/transport_security.c',
'src/core/census/grpc_context.c',
'src/core/census/grpc_filter.c',
'src/core/channel/channel_args.c',
'src/core/channel/channel_stack.c',
'src/core/channel/client_channel.c',
'src/core/channel/client_uchannel.c',
'src/core/channel/compress_filter.c',
'src/core/channel/connected_channel.c',
'src/core/channel/http_client_filter.c',
'src/core/channel/http_server_filter.c',
'src/core/channel/subchannel_call_holder.c',
'src/core/client_config/client_config.c',
'src/core/client_config/connector.c',
'src/core/client_config/default_initial_connect_string.c',
'src/core/client_config/initial_connect_string.c',
'src/core/client_config/lb_policies/pick_first.c',
'src/core/client_config/lb_policies/round_robin.c',
'src/core/client_config/lb_policy.c',
'src/core/client_config/lb_policy_factory.c',
'src/core/client_config/lb_policy_registry.c',
'src/core/client_config/resolver.c',
'src/core/client_config/resolver_factory.c',
'src/core/client_config/resolver_registry.c',
'src/core/client_config/resolvers/dns_resolver.c',
'src/core/client_config/resolvers/sockaddr_resolver.c',
'src/core/client_config/subchannel.c',
'src/core/client_config/subchannel_factory.c',
'src/core/client_config/uri_parser.c',
'src/core/compression/algorithm.c',
'src/core/compression/message_compress.c',
'src/core/debug/trace.c',
'src/core/httpcli/format_request.c',
'src/core/httpcli/httpcli.c',
'src/core/httpcli/parser.c',
'src/core/iomgr/closure.c',
'src/core/iomgr/endpoint.c',
'src/core/iomgr/endpoint_pair_posix.c',
'src/core/iomgr/endpoint_pair_windows.c',
'src/core/iomgr/exec_ctx.c',
'src/core/iomgr/executor.c',
'src/core/iomgr/fd_posix.c',
'src/core/iomgr/iocp_windows.c',
'src/core/iomgr/iomgr.c',
'src/core/iomgr/iomgr_posix.c',
'src/core/iomgr/iomgr_windows.c',
'src/core/iomgr/pollset_multipoller_with_epoll.c',
'src/core/iomgr/pollset_multipoller_with_poll_posix.c',
'src/core/iomgr/pollset_posix.c',
'src/core/iomgr/pollset_set_posix.c',
'src/core/iomgr/pollset_set_windows.c',
'src/core/iomgr/pollset_windows.c',
'src/core/iomgr/resolve_address_posix.c',
'src/core/iomgr/resolve_address_windows.c',
'src/core/iomgr/sockaddr_utils.c',
'src/core/iomgr/socket_utils_common_posix.c',
'src/core/iomgr/socket_utils_linux.c',
'src/core/iomgr/socket_utils_posix.c',
'src/core/iomgr/socket_windows.c',
'src/core/iomgr/tcp_client_posix.c',
'src/core/iomgr/tcp_client_windows.c',
'src/core/iomgr/tcp_posix.c',
'src/core/iomgr/tcp_server_posix.c',
'src/core/iomgr/tcp_server_windows.c',
'src/core/iomgr/tcp_windows.c',
'src/core/iomgr/time_averaged_stats.c',
'src/core/iomgr/timer.c',
'src/core/iomgr/timer_heap.c',
'src/core/iomgr/udp_server.c',
'src/core/iomgr/wakeup_fd_eventfd.c',
'src/core/iomgr/wakeup_fd_nospecial.c',
'src/core/iomgr/wakeup_fd_pipe.c',
'src/core/iomgr/wakeup_fd_posix.c',
'src/core/iomgr/workqueue_posix.c',
'src/core/iomgr/workqueue_windows.c',
'src/core/json/json.c',
'src/core/json/json_reader.c',
'src/core/json/json_string.c',
'src/core/json/json_writer.c',
'src/core/surface/api_trace.c',
'src/core/surface/byte_buffer.c',
'src/core/surface/byte_buffer_reader.c',
'src/core/surface/call.c',
'src/core/surface/call_details.c',
'src/core/surface/call_log_batch.c',
'src/core/surface/channel.c',
'src/core/surface/channel_connectivity.c',
'src/core/surface/channel_create.c',
'src/core/surface/channel_ping.c',
'src/core/surface/completion_queue.c',
'src/core/surface/event_string.c',
'src/core/surface/init.c',
'src/core/surface/lame_client.c',
'src/core/surface/metadata_array.c',
'src/core/surface/server.c',
'src/core/surface/server_chttp2.c',
'src/core/surface/server_create.c',
'src/core/surface/validate_metadata.c',
'src/core/surface/version.c',
'src/core/transport/byte_stream.c',
'src/core/transport/chttp2/alpn.c',
'src/core/transport/chttp2/bin_encoder.c',
'src/core/transport/chttp2/frame_data.c',
'src/core/transport/chttp2/frame_goaway.c',
'src/core/transport/chttp2/frame_ping.c',
'src/core/transport/chttp2/frame_rst_stream.c',
'src/core/transport/chttp2/frame_settings.c',
'src/core/transport/chttp2/frame_window_update.c',
'src/core/transport/chttp2/hpack_encoder.c',
'src/core/transport/chttp2/hpack_parser.c',
'src/core/transport/chttp2/hpack_table.c',
'src/core/transport/chttp2/huffsyms.c',
'src/core/transport/chttp2/incoming_metadata.c',
'src/core/transport/chttp2/parsing.c',
'src/core/transport/chttp2/status_conversion.c',
'src/core/transport/chttp2/stream_lists.c',
'src/core/transport/chttp2/stream_map.c',
'src/core/transport/chttp2/timeout_encoding.c',
'src/core/transport/chttp2/varint.c',
'src/core/transport/chttp2/writing.c',
'src/core/transport/chttp2_transport.c',
'src/core/transport/connectivity_state.c',
'src/core/transport/metadata.c',
'src/core/transport/metadata_batch.c',
'src/core/transport/static_metadata.c',
'src/core/transport/transport.c',
'src/core/transport/transport_op_string.c',
'src/core/census/context.c',
'src/core/census/initialize.c',
'src/core/census/operation.c',
'src/core/census/tag_set.c',
'src/core/census/tracing.c',
'src/boringssl/err_data.c',
'third_party/boringssl/crypto/aes/aes.c',
'third_party/boringssl/crypto/aes/mode_wrappers.c',
'third_party/boringssl/crypto/asn1/a_bitstr.c',
'third_party/boringssl/crypto/asn1/a_bool.c',
'third_party/boringssl/crypto/asn1/a_bytes.c',
'third_party/boringssl/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl/crypto/asn1/a_dup.c',
'third_party/boringssl/crypto/asn1/a_enum.c',
'third_party/boringssl/crypto/asn1/a_gentm.c',
'third_party/boringssl/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl/crypto/asn1/a_int.c',
'third_party/boringssl/crypto/asn1/a_mbstr.c',
'third_party/boringssl/crypto/asn1/a_object.c',
'third_party/boringssl/crypto/asn1/a_octet.c',
'third_party/boringssl/crypto/asn1/a_print.c',
'third_party/boringssl/crypto/asn1/a_strnid.c',
'third_party/boringssl/crypto/asn1/a_time.c',
'third_party/boringssl/crypto/asn1/a_type.c',
'third_party/boringssl/crypto/asn1/a_utctm.c',
'third_party/boringssl/crypto/asn1/a_utf8.c',
'third_party/boringssl/crypto/asn1/asn1_lib.c',
'third_party/boringssl/crypto/asn1/asn1_par.c',
'third_party/boringssl/crypto/asn1/asn_pack.c',
'third_party/boringssl/crypto/asn1/bio_asn1.c',
'third_party/boringssl/crypto/asn1/bio_ndef.c',
'third_party/boringssl/crypto/asn1/f_enum.c',
'third_party/boringssl/crypto/asn1/f_int.c',
'third_party/boringssl/crypto/asn1/f_string.c',
'third_party/boringssl/crypto/asn1/t_bitst.c',
'third_party/boringssl/crypto/asn1/t_pkey.c',
'third_party/boringssl/crypto/asn1/tasn_dec.c',
'third_party/boringssl/crypto/asn1/tasn_enc.c',
'third_party/boringssl/crypto/asn1/tasn_fre.c',
'third_party/boringssl/crypto/asn1/tasn_new.c',
'third_party/boringssl/crypto/asn1/tasn_prn.c',
'third_party/boringssl/crypto/asn1/tasn_typ.c',
'third_party/boringssl/crypto/asn1/tasn_utl.c',
'third_party/boringssl/crypto/asn1/x_bignum.c',
'third_party/boringssl/crypto/asn1/x_long.c',
'third_party/boringssl/crypto/base64/base64.c',
'third_party/boringssl/crypto/bio/bio.c',
'third_party/boringssl/crypto/bio/bio_mem.c',
'third_party/boringssl/crypto/bio/buffer.c',
'third_party/boringssl/crypto/bio/connect.c',
'third_party/boringssl/crypto/bio/fd.c',
'third_party/boringssl/crypto/bio/file.c',
'third_party/boringssl/crypto/bio/hexdump.c',
'third_party/boringssl/crypto/bio/pair.c',
'third_party/boringssl/crypto/bio/printf.c',
'third_party/boringssl/crypto/bio/socket.c',
'third_party/boringssl/crypto/bio/socket_helper.c',
'third_party/boringssl/crypto/bn/add.c',
'third_party/boringssl/crypto/bn/asm/x86_64-gcc.c',
'third_party/boringssl/crypto/bn/bn.c',
'third_party/boringssl/crypto/bn/bn_asn1.c',
'third_party/boringssl/crypto/bn/cmp.c',
'third_party/boringssl/crypto/bn/convert.c',
'third_party/boringssl/crypto/bn/ctx.c',
'third_party/boringssl/crypto/bn/div.c',
'third_party/boringssl/crypto/bn/exponentiation.c',
'third_party/boringssl/crypto/bn/gcd.c',
'third_party/boringssl/crypto/bn/generic.c',
'third_party/boringssl/crypto/bn/kronecker.c',
'third_party/boringssl/crypto/bn/montgomery.c',
'third_party/boringssl/crypto/bn/mul.c',
'third_party/boringssl/crypto/bn/prime.c',
'third_party/boringssl/crypto/bn/random.c',
'third_party/boringssl/crypto/bn/rsaz_exp.c',
'third_party/boringssl/crypto/bn/shift.c',
'third_party/boringssl/crypto/bn/sqrt.c',
'third_party/boringssl/crypto/buf/buf.c',
'third_party/boringssl/crypto/bytestring/ber.c',
'third_party/boringssl/crypto/bytestring/cbb.c',
'third_party/boringssl/crypto/bytestring/cbs.c',
'third_party/boringssl/crypto/chacha/chacha_generic.c',
'third_party/boringssl/crypto/chacha/chacha_vec.c',
'third_party/boringssl/crypto/cipher/aead.c',
'third_party/boringssl/crypto/cipher/cipher.c',
'third_party/boringssl/crypto/cipher/derive_key.c',
'third_party/boringssl/crypto/cipher/e_aes.c',
'third_party/boringssl/crypto/cipher/e_chacha20poly1305.c',
'third_party/boringssl/crypto/cipher/e_des.c',
'third_party/boringssl/crypto/cipher/e_null.c',
'third_party/boringssl/crypto/cipher/e_rc2.c',
'third_party/boringssl/crypto/cipher/e_rc4.c',
'third_party/boringssl/crypto/cipher/e_ssl3.c',
'third_party/boringssl/crypto/cipher/e_tls.c',
'third_party/boringssl/crypto/cipher/tls_cbc.c',
'third_party/boringssl/crypto/cmac/cmac.c',
'third_party/boringssl/crypto/conf/conf.c',
'third_party/boringssl/crypto/cpu-arm.c',
'third_party/boringssl/crypto/cpu-intel.c',
'third_party/boringssl/crypto/crypto.c',
'third_party/boringssl/crypto/curve25519/curve25519.c',
'third_party/boringssl/crypto/des/des.c',
'third_party/boringssl/crypto/dh/check.c',
'third_party/boringssl/crypto/dh/dh.c',
'third_party/boringssl/crypto/dh/dh_asn1.c',
'third_party/boringssl/crypto/dh/params.c',
'third_party/boringssl/crypto/digest/digest.c',
'third_party/boringssl/crypto/digest/digests.c',
'third_party/boringssl/crypto/directory_posix.c',
'third_party/boringssl/crypto/directory_win.c',
'third_party/boringssl/crypto/dsa/dsa.c',
'third_party/boringssl/crypto/dsa/dsa_asn1.c',
'third_party/boringssl/crypto/ec/ec.c',
'third_party/boringssl/crypto/ec/ec_asn1.c',
'third_party/boringssl/crypto/ec/ec_key.c',
'third_party/boringssl/crypto/ec/ec_montgomery.c',
'third_party/boringssl/crypto/ec/oct.c',
'third_party/boringssl/crypto/ec/p224-64.c',
'third_party/boringssl/crypto/ec/p256-64.c',
'third_party/boringssl/crypto/ec/p256-x86_64.c',
'third_party/boringssl/crypto/ec/simple.c',
'third_party/boringssl/crypto/ec/util-64.c',
'third_party/boringssl/crypto/ec/wnaf.c',
'third_party/boringssl/crypto/ecdh/ecdh.c',
'third_party/boringssl/crypto/ecdsa/ecdsa.c',
'third_party/boringssl/crypto/ecdsa/ecdsa_asn1.c',
'third_party/boringssl/crypto/engine/engine.c',
'third_party/boringssl/crypto/err/err.c',
'third_party/boringssl/crypto/evp/algorithm.c',
'third_party/boringssl/crypto/evp/digestsign.c',
'third_party/boringssl/crypto/evp/evp.c',
'third_party/boringssl/crypto/evp/evp_asn1.c',
'third_party/boringssl/crypto/evp/evp_ctx.c',
'third_party/boringssl/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl/crypto/evp/p_ec.c',
'third_party/boringssl/crypto/evp/p_ec_asn1.c',
'third_party/boringssl/crypto/evp/p_rsa.c',
'third_party/boringssl/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl/crypto/evp/pbkdf.c',
'third_party/boringssl/crypto/evp/sign.c',
'third_party/boringssl/crypto/ex_data.c',
'third_party/boringssl/crypto/hkdf/hkdf.c',
'third_party/boringssl/crypto/hmac/hmac.c',
'third_party/boringssl/crypto/lhash/lhash.c',
'third_party/boringssl/crypto/md4/md4.c',
'third_party/boringssl/crypto/md5/md5.c',
'third_party/boringssl/crypto/mem.c',
'third_party/boringssl/crypto/modes/cbc.c',
'third_party/boringssl/crypto/modes/cfb.c',
'third_party/boringssl/crypto/modes/ctr.c',
'third_party/boringssl/crypto/modes/gcm.c',
'third_party/boringssl/crypto/modes/ofb.c',
'third_party/boringssl/crypto/obj/obj.c',
'third_party/boringssl/crypto/obj/obj_xref.c',
'third_party/boringssl/crypto/pem/pem_all.c',
'third_party/boringssl/crypto/pem/pem_info.c',
'third_party/boringssl/crypto/pem/pem_lib.c',
'third_party/boringssl/crypto/pem/pem_oth.c',
'third_party/boringssl/crypto/pem/pem_pk8.c',
'third_party/boringssl/crypto/pem/pem_pkey.c',
'third_party/boringssl/crypto/pem/pem_x509.c',
'third_party/boringssl/crypto/pem/pem_xaux.c',
'third_party/boringssl/crypto/pkcs8/p5_pbe.c',
'third_party/boringssl/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl/crypto/pkcs8/p8_pkey.c',
'third_party/boringssl/crypto/pkcs8/pkcs8.c',
'third_party/boringssl/crypto/poly1305/poly1305.c',
'third_party/boringssl/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl/crypto/rand/rand.c',
'third_party/boringssl/crypto/rand/urandom.c',
'third_party/boringssl/crypto/rand/windows.c',
'third_party/boringssl/crypto/rc4/rc4.c',
'third_party/boringssl/crypto/refcount_c11.c',
'third_party/boringssl/crypto/refcount_lock.c',
'third_party/boringssl/crypto/rsa/blinding.c',
'third_party/boringssl/crypto/rsa/padding.c',
'third_party/boringssl/crypto/rsa/rsa.c',
'third_party/boringssl/crypto/rsa/rsa_asn1.c',
'third_party/boringssl/crypto/rsa/rsa_impl.c',
'third_party/boringssl/crypto/sha/sha1.c',
'third_party/boringssl/crypto/sha/sha256.c',
'third_party/boringssl/crypto/sha/sha512.c',
'third_party/boringssl/crypto/stack/stack.c',
'third_party/boringssl/crypto/thread.c',
'third_party/boringssl/crypto/thread_none.c',
'third_party/boringssl/crypto/thread_pthread.c',
'third_party/boringssl/crypto/thread_win.c',
'third_party/boringssl/crypto/time_support.c',
'third_party/boringssl/crypto/x509/a_digest.c',
'third_party/boringssl/crypto/x509/a_sign.c',
'third_party/boringssl/crypto/x509/a_strex.c',
'third_party/boringssl/crypto/x509/a_verify.c',
'third_party/boringssl/crypto/x509/asn1_gen.c',
'third_party/boringssl/crypto/x509/by_dir.c',
'third_party/boringssl/crypto/x509/by_file.c',
'third_party/boringssl/crypto/x509/i2d_pr.c',
'third_party/boringssl/crypto/x509/pkcs7.c',
'third_party/boringssl/crypto/x509/t_crl.c',
'third_party/boringssl/crypto/x509/t_req.c',
'third_party/boringssl/crypto/x509/t_x509.c',
'third_party/boringssl/crypto/x509/t_x509a.c',
'third_party/boringssl/crypto/x509/x509.c',
'third_party/boringssl/crypto/x509/x509_att.c',
'third_party/boringssl/crypto/x509/x509_cmp.c',
'third_party/boringssl/crypto/x509/x509_d2.c',
'third_party/boringssl/crypto/x509/x509_def.c',
'third_party/boringssl/crypto/x509/x509_ext.c',
'third_party/boringssl/crypto/x509/x509_lu.c',
'third_party/boringssl/crypto/x509/x509_obj.c',
'third_party/boringssl/crypto/x509/x509_r2x.c',
'third_party/boringssl/crypto/x509/x509_req.c',
'third_party/boringssl/crypto/x509/x509_set.c',
'third_party/boringssl/crypto/x509/x509_trs.c',
'third_party/boringssl/crypto/x509/x509_txt.c',
'third_party/boringssl/crypto/x509/x509_v3.c',
'third_party/boringssl/crypto/x509/x509_vfy.c',
'third_party/boringssl/crypto/x509/x509_vpm.c',
'third_party/boringssl/crypto/x509/x509cset.c',
'third_party/boringssl/crypto/x509/x509name.c',
'third_party/boringssl/crypto/x509/x509rset.c',
'third_party/boringssl/crypto/x509/x509spki.c',
'third_party/boringssl/crypto/x509/x509type.c',
'third_party/boringssl/crypto/x509/x_algor.c',
'third_party/boringssl/crypto/x509/x_all.c',
'third_party/boringssl/crypto/x509/x_attrib.c',
'third_party/boringssl/crypto/x509/x_crl.c',
'third_party/boringssl/crypto/x509/x_exten.c',
'third_party/boringssl/crypto/x509/x_info.c',
'third_party/boringssl/crypto/x509/x_name.c',
'third_party/boringssl/crypto/x509/x_pkey.c',
'third_party/boringssl/crypto/x509/x_pubkey.c',
'third_party/boringssl/crypto/x509/x_req.c',
'third_party/boringssl/crypto/x509/x_sig.c',
'third_party/boringssl/crypto/x509/x_spki.c',
'third_party/boringssl/crypto/x509/x_val.c',
'third_party/boringssl/crypto/x509/x_x509.c',
'third_party/boringssl/crypto/x509/x_x509a.c',
'third_party/boringssl/crypto/x509v3/pcy_cache.c',
'third_party/boringssl/crypto/x509v3/pcy_data.c',
'third_party/boringssl/crypto/x509v3/pcy_lib.c',
'third_party/boringssl/crypto/x509v3/pcy_map.c',
'third_party/boringssl/crypto/x509v3/pcy_node.c',
'third_party/boringssl/crypto/x509v3/pcy_tree.c',
'third_party/boringssl/crypto/x509v3/v3_akey.c',
'third_party/boringssl/crypto/x509v3/v3_akeya.c',
'third_party/boringssl/crypto/x509v3/v3_alt.c',
'third_party/boringssl/crypto/x509v3/v3_bcons.c',
'third_party/boringssl/crypto/x509v3/v3_bitst.c',
'third_party/boringssl/crypto/x509v3/v3_conf.c',
'third_party/boringssl/crypto/x509v3/v3_cpols.c',
'third_party/boringssl/crypto/x509v3/v3_crld.c',
'third_party/boringssl/crypto/x509v3/v3_enum.c',
'third_party/boringssl/crypto/x509v3/v3_extku.c',
'third_party/boringssl/crypto/x509v3/v3_genn.c',
'third_party/boringssl/crypto/x509v3/v3_ia5.c',
'third_party/boringssl/crypto/x509v3/v3_info.c',
'third_party/boringssl/crypto/x509v3/v3_int.c',
'third_party/boringssl/crypto/x509v3/v3_lib.c',
'third_party/boringssl/crypto/x509v3/v3_ncons.c',
'third_party/boringssl/crypto/x509v3/v3_pci.c',
'third_party/boringssl/crypto/x509v3/v3_pcia.c',
'third_party/boringssl/crypto/x509v3/v3_pcons.c',
'third_party/boringssl/crypto/x509v3/v3_pku.c',
'third_party/boringssl/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl/crypto/x509v3/v3_prn.c',
'third_party/boringssl/crypto/x509v3/v3_purp.c',
'third_party/boringssl/crypto/x509v3/v3_skey.c',
'third_party/boringssl/crypto/x509v3/v3_sxnet.c',
'third_party/boringssl/crypto/x509v3/v3_utl.c',
'third_party/boringssl/ssl/custom_extensions.c',
'third_party/boringssl/ssl/d1_both.c',
'third_party/boringssl/ssl/d1_clnt.c',
'third_party/boringssl/ssl/d1_lib.c',
'third_party/boringssl/ssl/d1_meth.c',
'third_party/boringssl/ssl/d1_pkt.c',
'third_party/boringssl/ssl/d1_srtp.c',
'third_party/boringssl/ssl/d1_srvr.c',
'third_party/boringssl/ssl/dtls_record.c',
'third_party/boringssl/ssl/pqueue/pqueue.c',
'third_party/boringssl/ssl/s3_both.c',
'third_party/boringssl/ssl/s3_clnt.c',
'third_party/boringssl/ssl/s3_enc.c',
'third_party/boringssl/ssl/s3_lib.c',
'third_party/boringssl/ssl/s3_meth.c',
'third_party/boringssl/ssl/s3_pkt.c',
'third_party/boringssl/ssl/s3_srvr.c',
'third_party/boringssl/ssl/ssl_aead_ctx.c',
'third_party/boringssl/ssl/ssl_asn1.c',
'third_party/boringssl/ssl/ssl_buffer.c',
'third_party/boringssl/ssl/ssl_cert.c',
'third_party/boringssl/ssl/ssl_cipher.c',
'third_party/boringssl/ssl/ssl_file.c',
'third_party/boringssl/ssl/ssl_lib.c',
'third_party/boringssl/ssl/ssl_rsa.c',
'third_party/boringssl/ssl/ssl_session.c',
'third_party/boringssl/ssl/ssl_stat.c',
'third_party/boringssl/ssl/t1_enc.c',
'third_party/boringssl/ssl/t1_lib.c',
'third_party/boringssl/ssl/tls_record.c',
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
]
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Island snapshot Code.
"""
import os
import string
import random
import md5
from cinder import context
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder import test
from cinder import utils
from cinder.volume.island.objectstorage import api as objectpool
from cinder.volume.island import snapshot
from cinder.volume.island import volume
from cinder.volume.island.vdisk.qcow2 import api as vdisk
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class SnapshotTestCase(test.TestCase):
"""Test Case for Snapshot."""
def setUp(self):
super(SnapshotTestCase, self).setUp()
self.context = context.RequestContext(user_id='fake',
project_id='fake',
is_admin=False,
read_deleted='no',
overwrite=False)
def _generate_vdisk_path(self, context, vdisk_name):
vdisk_path = FLAGS.local_volume_directory + vdisk_name
return vdisk_path
def _generate_random_string(self, n):
x = string.digits + string.lowercase
return string.join(random.sample(list(x), n )).replace(" ","")
def _vdisk_exsit(self, context, vdisk_name):
vdisk_path = self._generate_vdisk_path(context, vdisk_name)
return os.path.exists(vdisk_path)
def _write(self, vdisk_name, byte, offset, size):
vdisk_path = self._generate_vdisk_path(context, vdisk_name)
utils.execute('qemu-io',
'-c',
'write -P %s %s %s' % (byte, offset, size),
vdisk_path,
run_as_root=True)
def tearDown(self):
super(SnapshotTestCase, self).tearDown()
def test_snapshot_block(self):
context = self.context
snapshot_name = 'test_snapshot'
block_index = 1111
data = 'a' * FLAGS.cluster_size
block_name = FLAGS.snapshot_block_template % \
(snapshot_name, block_index)
self.assertFalse(objectpool.is_exist(context, block_name))
snapshot._put_snapshot_block(context, snapshot_name, block_index, data)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
md5_str = md5.new(data).hexdigest()
self.assertEqual(meta['checksum'], md5_str)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '0')
data = snapshot._get_snapshot_block(context,
snapshot_name,
block_index)
self.assertEqual(md5_str, md5.new(data).hexdigest())
for i in range(1000):
name = "sp%s" % i
snapshot._link_snapshot_block(context, snapshot_name,
block_index, name)
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['X-refcount'], str(i + 1))
self.assertEqual(meta['X-last-link-snapshot'], name)
snapshot._link_snapshot_block(context, snapshot_name,
block_index, name)
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['X-refcount'], str(i + 1))
self.assertEqual(meta['X-last-link-snapshot'], name)
for i in range(999):
name = "sp%s" % i
snapshot._unlink_snapshot_block(context, snapshot_name,
block_index, name)
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['X-refcount'], str(1000 - i - 1))
self.assertEqual(meta['X-last-unlink-snapshot'], name)
snapshot._unlink_snapshot_block(context, snapshot_name,
block_index, name)
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['X-refcount'], str(1000 - i - 1))
self.assertEqual(meta['X-last-unlink-snapshot'], name)
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['X-refcount'], str(1))
self.assertEqual(meta['X-last-unlink-snapshot'], 'sp998')
snapshot._unlink_snapshot_block(context, snapshot_name,
block_index, 'lastunlink')
self.assertFalse(objectpool.is_exist(context, block_name))
snapshot_name = 'test_snapshot_fordelete'
block_index = 2222
data = 'a' * FLAGS.cluster_size
block_name = FLAGS.snapshot_block_template % \
(snapshot_name, block_index)
self.assertFalse(objectpool.is_exist(context, block_name))
snapshot._put_snapshot_block(context, snapshot_name, block_index, data)
self.assertTrue(objectpool.is_exist(context, block_name))
snapshot._delete_snapshot_block(context, snapshot_name, block_index)
self.assertFalse(objectpool.is_exist(context, block_name))
def test_snapshot_pointer_table(self):
context = self.context
update_blocks = {}
old_pt = {}
for i in xrange(1024*1024):
old_pt[i] = 'old'
if i % 2 == 0:
update_blocks[i] = 'new'
pt = snapshot._make_snapshot_pointer_table(context, 'new',
update_blocks, old_pt)
for i in xrange(1024*1024):
if i % 2 == 0:
self.assertEqual(pt[i], 'new')
else:
self.assertEqual(pt[i], 'old')
pt_name = FLAGS.snapshot_pt_template % 'new'
self.assertFalse(objectpool.is_exist(context, pt_name))
snapshot._put_snapshot_pointer_table(context, 'new', pt)
self.assertTrue(objectpool.is_exist(context, pt_name))
new_pt = snapshot._get_snapshot_pointer_table(context, 'new')
self.assertEqual(len(new_pt), 1024*1024)
for i in xrange(1024*1024):
self.assertEqual(new_pt[i], pt[i])
snapshot._delete_snapshot_pointer_table(context, 'new')
self.assertFalse(objectpool.is_exist(context, pt_name))
def test_snapshot(self):
context = self.context
cluster_size = FLAGS.cluster_size
vdisk_name = 'test_vdisk_name'
self.assertFalse(self._vdisk_exsit(context, vdisk_name))
vdisk.create_vdisk(context, vdisk_name, '1G')
self.assertTrue(self._vdisk_exsit(context, vdisk_name))
vdisk_meta = vdisk.read_vdisk_meta(context, vdisk_name)
self.assertEqual(vdisk_meta['cluster_size'], 2 * 1024 * 1024)
self.assertEqual(vdisk_meta['snapshots_number'], 0)
# 1. Create some snapshots
#write 0~9 cluster , make snapshot01, write 'a'
for i in range(10):
self._write(vdisk_name, '0x61', i * cluster_size, cluster_size)
vdisk.create_snapshot(context, vdisk_name, 'snapshot01')
#write 10~19 cluster , make snapshot02, write 'b'
for i in range(10, 20):
self._write(vdisk_name, '0x62', i * cluster_size, cluster_size)
vdisk.create_snapshot(context, vdisk_name, 'snapshot02')
#write 20~29 cluster , make snapshot03, write 'c'
for i in range(20, 30):
self._write(vdisk_name, '0x63', i * cluster_size, cluster_size)
vdisk.create_snapshot(context, vdisk_name, 'snapshot03')
#write 500~511 cluster , make snapshot04, write 'd'
for i in range(500, 512):
self._write(vdisk_name, '0x64', i * cluster_size, cluster_size)
vdisk.create_snapshot(context, vdisk_name, 'snapshot04')
#write 0~39 cluster , make snapshot05, write 'e'
for i in range(40):
self._write(vdisk_name, '0x65', i * cluster_size, cluster_size)
vdisk.create_snapshot(context, vdisk_name, 'snapshot05')
# 2. Upload snapshots
lastsnapshot_name = None
for i in range(1, 6):
snapshot_name = 'snapshot0%s' % i
pt_name = FLAGS.snapshot_pt_template % snapshot_name
self.assertFalse(objectpool.is_exist(context, pt_name))
snapshot.put_snapshot(context, vdisk_name,
snapshot_name, lastsnapshot_name)
self.assertTrue(objectpool.is_exist(context, pt_name))
lastsnapshot_name = snapshot_name
# 3. Check snapshots' pointer table
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot01')
self.assertEqual(10, len(pt))
for i in range(10):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot01')
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot02')
self.assertEqual(20, len(pt))
for i in range(10):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot01')
for i in range(10, 20):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot02')
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot03')
self.assertEqual(30, len(pt))
for i in range(10):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot01')
for i in range(10, 20):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot02')
for i in range(20, 30):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot03')
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot04')
self.assertEqual(42, len(pt))
for i in range(10):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot01')
for i in range(10, 20):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot02')
for i in range(20, 30):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot03')
for i in range(500, 512):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot04')
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot05')
self.assertEqual(52, len(pt))
for i in range(40):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot05')
for i in range(500, 512):
self.assertTrue(i in pt)
self.assertEqual(pt[i], 'snapshot04')
# 4. Check snapshot block refcount
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot01')
self.assertEqual(10, len(pt))
for i in range(10):
block_name = FLAGS.snapshot_block_template % (pt[i], i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '4')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot04')
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot02')
self.assertEqual(20, len(pt))
for i in range(10, 20):
block_name = FLAGS.snapshot_block_template % (pt[i], i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '3')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot04')
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot03')
self.assertEqual(30, len(pt))
for i in range(20, 30):
block_name = FLAGS.snapshot_block_template % (pt[i], i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '2')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot04')
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot04')
self.assertEqual(42, len(pt))
for i in range(500, 512):
block_name = FLAGS.snapshot_block_template % (pt[i], i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '2')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot05')
pt = snapshot._get_snapshot_pointer_table(context, 'snapshot05')
self.assertEqual(52, len(pt))
for i in range(40):
block_name = FLAGS.snapshot_block_template % (pt[i], i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '1')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot05')
# 5. Create new volumes from snapshots
for i in range(1, 6):
volume_name = 'snapshot0%s_vol' % i
snapshot_name = 'snapshot0%s' % i
self.assertFalse(vdisk.vdisk_exist(context, volume_name))
vdisk.create_vdisk(context, volume_name, '1G')
snapshot.get_snapshot(context, snapshot_name, volume_name)
self.assertTrue(vdisk.vdisk_exist(context, volume_name))
# 6. Check new volumes data
# check snapshot01_vol
vdisk_pointer_table = vdisk.get_vdisk_pointer_table(context,
'snapshot01_vol')
vdisk_path = self._generate_vdisk_path(context, 'snapshot01_vol')
self.assertEqual(10, len(vdisk_pointer_table))
fd = open(vdisk_path, 'rb')
for i in range(10):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'a')
fd.close()
#check snapshot02_vol
vdisk_pointer_table = vdisk.get_vdisk_pointer_table(context,
'snapshot02_vol')
vdisk_path = self._generate_vdisk_path(context, 'snapshot02_vol')
self.assertEqual(20, len(vdisk_pointer_table))
fd = open(vdisk_path, 'rb')
for i in range(10):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'a')
for i in range(10, 20):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'b')
fd.close()
#check snapshot03_vol
vdisk_pointer_table = vdisk.get_vdisk_pointer_table(context,
'snapshot03_vol')
vdisk_path = self._generate_vdisk_path(context, 'snapshot03_vol')
self.assertEqual(30, len(vdisk_pointer_table))
fd = open(vdisk_path, 'rb')
for i in range(10):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'a')
for i in range(10, 20):
self.assertTrue((i + 10) in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'b')
for i in range(20, 30):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'c')
fd.close()
#check snapshot04_vol
vdisk_pointer_table = vdisk.get_vdisk_pointer_table(context,
'snapshot04_vol')
vdisk_path = self._generate_vdisk_path(context, 'snapshot04_vol')
self.assertEqual(42, len(vdisk_pointer_table))
fd = open(vdisk_path, 'rb')
for i in range(10):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'a')
for i in range(10, 20):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'b')
for i in range(20, 30):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'c')
for i in range(500, 512):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'd')
fd.close()
#check snapshot05_vol
vdisk_pointer_table = vdisk.get_vdisk_pointer_table(context,
'snapshot05_vol')
vdisk_path = self._generate_vdisk_path(context, 'snapshot05_vol')
self.assertEqual(52, len(vdisk_pointer_table))
fd = open(vdisk_path, 'rb')
for i in range(40):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'e')
for i in range(500, 512):
self.assertTrue(i in vdisk_pointer_table)
fd.seek(vdisk_pointer_table[i], 0)
data = fd.read(1)
self.assertEqual(data, 'd')
fd.close()
# 7. Delete snapshots and Check snapshot block refcount
# delete snapshot01
snapshot.delete_snapshot(context, 'snapshot01')
pt_name = FLAGS.snapshot_pt_template % 'snapshot01'
self.assertFalse(objectpool.is_exist(context, pt_name))
for i in range(10):
block_name = FLAGS.snapshot_block_template % ('snapshot01', i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '3')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot04')
self.assertEqual(meta['X-last-unlink-snapshot'], 'snapshot01')
# delete snapshot02
snapshot.delete_snapshot(context, 'snapshot02')
pt_name = FLAGS.snapshot_pt_template % 'snapshot02'
self.assertFalse(objectpool.is_exist(context, pt_name))
for i in range(10):
block_name = FLAGS.snapshot_block_template % ('snapshot01', i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '2')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot04')
self.assertEqual(meta['X-last-unlink-snapshot'], 'snapshot02')
for i in range(10, 20):
block_name = FLAGS.snapshot_block_template % ('snapshot02', i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '2')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot04')
self.assertEqual(meta['X-last-unlink-snapshot'], 'snapshot02')
# delete snapshot03
snapshot.delete_snapshot(context, 'snapshot03')
pt_name = FLAGS.snapshot_pt_template % 'snapshot03'
self.assertFalse(objectpool.is_exist(context, pt_name))
for i in range(10):
block_name = FLAGS.snapshot_block_template % ('snapshot01', i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '1')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot04')
self.assertEqual(meta['X-last-unlink-snapshot'], 'snapshot03')
for i in range(10, 20):
block_name = FLAGS.snapshot_block_template % ('snapshot02', i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '1')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot04')
self.assertEqual(meta['X-last-unlink-snapshot'], 'snapshot03')
for i in range(20, 30):
block_name = FLAGS.snapshot_block_template % ('snapshot03', i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '1')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot04')
self.assertEqual(meta['X-last-unlink-snapshot'], 'snapshot03')
# delete snapshot04
snapshot.delete_snapshot(context, 'snapshot04')
pt_name = FLAGS.snapshot_pt_template % 'snapshot04'
self.assertFalse(objectpool.is_exist(context, pt_name))
for i in range(10):
block_name = FLAGS.snapshot_block_template % ('snapshot01', i)
self.assertFalse(objectpool.is_exist(context, block_name))
for i in range(10, 20):
block_name = FLAGS.snapshot_block_template % ('snapshot02', i)
self.assertFalse(objectpool.is_exist(context, block_name))
for i in range(20, 30):
block_name = FLAGS.snapshot_block_template % ('snapshot03', i)
self.assertFalse(objectpool.is_exist(context, block_name))
for i in range(500, 512):
block_name = FLAGS.snapshot_block_template % ('snapshot04', i)
self.assertTrue(objectpool.is_exist(context, block_name))
meta = objectpool.get_object_meta(context, block_name)
self.assertEqual(meta['size'], str(FLAGS.cluster_size))
self.assertEqual(meta['X-refcount'], '1')
self.assertEqual(meta['X-last-link-snapshot'], 'snapshot05')
self.assertEqual(meta['X-last-unlink-snapshot'], 'snapshot04')
# delete snapshot05
snapshot.delete_snapshot(context, 'snapshot05')
pt_name = FLAGS.snapshot_pt_template % 'snapshot05'
self.assertFalse(objectpool.is_exist(context, pt_name))
for i in range(40):
block_name = FLAGS.snapshot_block_template % ('snapshot05', i)
self.assertFalse(objectpool.is_exist(context, block_name))
for i in range(500, 512):
block_name = FLAGS.snapshot_block_template % ('snapshot04', i)
self.assertFalse(objectpool.is_exist(context, block_name))
# 8. Delete new volumes
for i in range(1, 6):
volume_name = 'snapshot0%s_vol' % i
vdisk.delete_vdisk(context, volume_name)
self.assertFalse(vdisk.vdisk_exist(context, volume_name))
# 9. Deletet volume
vdisk.delete_vdisk(context,vdisk_name)
self.assertFalse(vdisk.vdisk_exist(context, vdisk_name))
|
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import datetime as pydt
import logging
import json
import uuid
import bson.json_util as bju
import os
# Our imports
import emission.core.get_database as edb
import emission.storage.timeseries.timequery as estt
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.pipeline_queries as epq
import emission.core.wrapper.pipelinestate as ecwp
import emission.analysis.intake.segmentation.trip_segmentation_methods.dwell_segmentation_time_filter as dstf
import emission.analysis.intake.segmentation.trip_segmentation_methods.dwell_segmentation_dist_filter as dsdf
import emission.analysis.intake.segmentation.trip_segmentation as eaist
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.storage.timeseries.format_hacks.move_filter_field as estfm
import emission.storage.decorations.place_queries as esdp
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.analysis_timeseries_queries as esda
# Test imports
import emission.tests.common as etc
class TestTripSegmentation(unittest.TestCase):
def setUp(self):
self.analysis_conf_path = \
etc.set_analysis_config("intake.cleaning.filter_accuracy.enable", True)
etc.setupRealExample(self, "emission/tests/data/real_examples/shankari_2015-aug-27")
self.androidUUID = self.testUUID
self.testUUID = uuid.UUID("c76a0487-7e5a-3b17-a449-47be666b36f6")
with open("emission/tests/data/real_examples/iphone_2015-11-06") as fp:
self.entries = json.load(fp, object_hook = bju.object_hook)
etc.setupRealExampleWithEntries(self)
self.iosUUID = self.testUUID
eaicf.filter_accuracy(self.iosUUID)
logging.debug("androidUUID = %s, iosUUID = %s" % (self.androidUUID, self.iosUUID))
def tearDown(self):
os.remove(self.analysis_conf_path)
edb.get_timeseries_db().delete_many({"user_id": self.androidUUID})
edb.get_timeseries_db().delete_many({"user_id": self.iosUUID})
edb.get_pipeline_state_db().delete_many({"user_id": self.androidUUID})
edb.get_pipeline_state_db().delete_many({"user_id": self.iosUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.androidUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.iosUUID})
def testEmptyCall(self):
import uuid
dummyUserId = uuid.uuid4()
# We just expect that this won't raise an exception
eaist.segment_current_trips(dummyUserId)
def testSegmentationPointsDwellSegmentationTimeFilter(self):
ts = esta.TimeSeries.get_time_series(self.androidUUID)
tq = estt.TimeQuery("metadata.write_ts", 1440658800, 1440745200)
dstfsm = dstf.DwellSegmentationTimeFilter(time_threshold = 5 * 60, # 5 mins
point_threshold = 10,
distance_threshold = 100) # 100 m
segmentation_points = dstfsm.segment_into_trips(ts, tq)
for (start, end) in segmentation_points:
logging.debug("trip is from %s (%f) -> %s (%f)" % (start.fmt_time, start.ts, end.fmt_time, end.ts))
self.assertIsNotNone(segmentation_points)
self.assertEqual(len(segmentation_points), 8)
self.assertEqual([start.ts for (start, end) in segmentation_points],
[1440688739.672, 1440689662.943, 1440690718.768, 1440695152.989,
1440699933.687, 1440716367.376, 1440720239.012, 1440728519.971])
self.assertEqual([end.ts for (start, end) in segmentation_points],
[1440689408.302, 1440690108.678, 1440694424.894, 1440699298.535,
1440700070.129, 1440719699.470, 1440723334.898, 1440729184.411])
def testSegmentationPointsDwellSegmentationDistFilter(self):
ts = esta.TimeSeries.get_time_series(self.iosUUID)
tq = estt.TimeQuery("metadata.write_ts", 1446796800, 1446847600)
dstdsm = dsdf.DwellSegmentationDistFilter(time_threshold = 10 * 60, # 5 mins
point_threshold = 10,
distance_threshold = 100) # 100 m
segmentation_points = dstdsm.segment_into_trips(ts, tq)
for (start, end) in segmentation_points:
logging.debug("trip is from %s (%f) -> %s (%f)" % (start.fmt_time, start.ts, end.fmt_time, end.ts))
self.assertIsNotNone(segmentation_points)
self.assertEqual(len(segmentation_points), 2)
self.assertEqual([start.ts for (start, end) in segmentation_points],
[1446797042.282652, 1446821561.559255])
self.assertEqual([end.ts for (start, end) in segmentation_points],
[1446797923.682973, 1446828217.125328])
def testSegmentationWrapperAndroid(self):
eaist.segment_current_trips(self.androidUUID)
# The previous line should have created places and trips and stored
# them into the database. Now, we want to query to ensure that they
# were created correctly.
tq_place = estt.TimeQuery("data.enter_ts", 1440658800, 1440745200)
created_places_entries = esda.get_entries(esda.RAW_PLACE_KEY,
self.androidUUID, tq_place)
tq_trip = estt.TimeQuery("data.start_ts", 1440658800, 1440745200)
created_trips_entries = esda.get_entries(esda.RAW_TRIP_KEY,
self.androidUUID, tq_trip)
for i, place in enumerate(created_places_entries):
logging.debug("Retrieved places %s: %s -> %s" % (i, place.data.enter_fmt_time, place.data.exit_fmt_time))
for i, trip in enumerate(created_trips_entries):
logging.debug("Retrieved trips %s: %s -> %s" % (i, trip.data.start_fmt_time, trip.data.end_fmt_time))
# We expect there to be 9 places, but the first one is that start of
# the chain, so it has a start_time of None and it won't be retrieved
# by the query on the start_time that we show here.
self.assertEqual(len(created_places_entries), 9)
self.assertEqual(len(created_trips_entries), 8)
# Pick the first two trips and the first place and ensure that they are all linked correctly
# Note that this is the first place, not the second place because the true first place will not
# be retrieved by the query, as shown above
trip0 = created_trips_entries[0]
trip1 = created_trips_entries[1]
place0 = created_places_entries[0]
self.assertEqual(trip0.data.end_place, place0.get_id())
self.assertEqual(trip1.data.start_place, place0.get_id())
self.assertEqual(place0.data.ending_trip, trip0.get_id())
self.assertEqual(place0.data.starting_trip, trip1.get_id())
self.assertEqual(round(trip0.data.duration), 11 * 60 + 9)
self.assertEqual(round(trip1.data.duration), 6 * 60 + 54)
self.assertIsNotNone(place0.data.location)
def testSegmentationWrapperIOS(self):
eaist.segment_current_trips(self.iosUUID)
# The previous line should have created places and trips and stored
# them into the database. Now, we want to query to ensure that they
# were created correctly.
tq_place = estt.TimeQuery("data.enter_ts", 1446796800, 1446847600)
created_places_entries = esda.get_entries(esda.RAW_PLACE_KEY,
self.iosUUID, tq_place)
tq_trip = estt.TimeQuery("data.start_ts", 1446796800, 1446847600)
created_trips_entries = esda.get_entries(esda.RAW_TRIP_KEY,
self.iosUUID, tq_trip)
for i, place in enumerate(created_places_entries):
logging.debug("Retrieved places %s: %s -> %s" % (i, place.data.enter_fmt_time, place.data.exit_fmt_time))
for i, trip in enumerate(created_trips_entries):
logging.debug("Retrieved trips %s: %s -> %s" % (i, trip.data.start_fmt_time, trip.data.end_fmt_time))
# We expect there to be 4 places, but the first one is that start of
# the chain, so it has a start_time of None and it won't be retrieved
# by the query on the start_time that we show here.
self.assertEqual(len(created_places_entries), 2)
self.assertEqual(len(created_trips_entries), 2)
# Pick the first two trips and the first place and ensure that they are all linked correctly
# Note that this is the first place, not the second place because the true first place will not
# be retrieved by the query, as shown above
# The first trip here is a dummy trip, so let's check the second and third trip instead
trip0 = created_trips_entries[0]
trip1 = created_trips_entries[1]
place0 = created_places_entries[0]
self.assertEqual(trip0.data.end_place, place0.get_id())
self.assertEqual(trip1.data.start_place, place0.get_id())
self.assertEqual(place0.data.ending_trip, trip0.get_id())
self.assertEqual(place0.data.starting_trip, trip1.get_id())
self.assertEqual(round(trip0.data.duration), 14 * 60 + 41)
self.assertEqual(round(trip1.data.duration), 1 * 60 * 60 + 50 * 60 + 56)
self.assertIsNotNone(place0.data.location)
def testSegmentationWrapperCombined(self):
# Change iOS entries to have the android UUID
tsdb = edb.get_timeseries_db()
for entry in esta.TimeSeries.get_time_series(
self.iosUUID).find_entries():
entry["user_id"] = self.androidUUID
edb.save(tsdb, entry)
# Now, segment the data for the combined UUID, which will include both
# android and ios
eaist.segment_current_trips(self.androidUUID)
tq_place = estt.TimeQuery("data.enter_ts", 1440658800, 1446847600)
created_places_entries = esda.get_entries(esda.RAW_PLACE_KEY,
self.androidUUID, tq_place)
tq_trip = estt.TimeQuery("data.start_ts", 1440658800, 1446847600)
created_trips_entries = esda.get_entries(esda.RAW_TRIP_KEY,
self.androidUUID, tq_trip,
untracked_key=esda.RAW_UNTRACKED_KEY)
for i, place in enumerate(created_places_entries):
logging.debug("Retrieved places %s: %s -> %s" % (i, place.data.enter_fmt_time, place.data.exit_fmt_time))
for i, trip in enumerate(created_trips_entries):
logging.debug("Retrieved trips %s: %s -> %s" % (i, trip.data.start_fmt_time, trip.data.end_fmt_time))
# We expect there to be 12 places, but the first one is that start of
# the chain, so it has a start_time of None and it won't be retrieved
# by the query on the start_time that we show here.
self.assertEqual(len(created_places_entries), 11)
self.assertEqual(len(created_trips_entries), 11)
# Pick the first two trips and the first place and ensure that they are all linked correctly
# Note that this is the first place, not the second place because the true first place will not
# be retrieved by the query, as shown above
# The first trip here is a dummy trip, so let's check the second and third trip instead
trip0time = created_trips_entries[0]
trip1time = created_trips_entries[1]
place0time = created_places_entries[0]
self.assertEqual(trip0time.data.end_place, place0time.get_id())
self.assertEqual(trip1time.data.start_place, place0time.get_id())
self.assertEqual(place0time.data.ending_trip, trip0time.get_id())
self.assertEqual(place0time.data.starting_trip, trip1time.get_id())
self.assertEqual(round(trip0time.data.duration), 11 * 60 + 9)
self.assertEqual(round(trip1time.data.duration), 6 * 60 + 54)
self.assertIsNotNone(place0time.data.location)
# There are 9 android "trips" first (index: 0-8), including the untracked time
# index 9 is the short, bogus trip
# So we want to check trips 10 and 11
trip0dist = created_trips_entries[9]
trip1dist = created_trips_entries[10]
place0dist = created_places_entries[9]
self.assertEqual(trip0dist.data.end_place, place0dist.get_id())
self.assertEqual(trip1dist.data.start_place, place0dist.get_id())
self.assertEqual(place0dist.data.ending_trip, trip0dist.get_id())
self.assertEqual(place0dist.data.starting_trip, trip1dist.get_id())
self.assertEqual(round(trip0dist.data.duration), 14 * 60 + 41)
self.assertEqual(round(trip1dist.data.duration), 1 * 60 * 60 + 50 * 60 + 56)
self.assertIsNotNone(place0dist.data.location)
if __name__ == '__main__':
etc.configLogging()
unittest.main()
|
|
"""
.. module:: fhmm
:platform: Unix
:synopsis: Contains methods for training and fitting Factorials HMMs.
.. moduleauthor:: Phil Ngo <ngo.phil@gmail.com>
.. moduleauthor:: Miguel Perez <miguel.a.perez4@gmail.com>
.. moduleauthor:: Stephen Suffian <stephen.suffian@gmail.com>
.. moduleauthor:: Sabina Tomkins <sabina.tomkins@gmail.com>
"""
from hmmlearn import hmm
import utils
from copy import deepcopy
import numpy as np
import pandas as pd
from collections import OrderedDict
import itertools
import matplotlib.pyplot as plt
import json
def init_HMM(pi_prior,a_prior,mean_prior,cov_prior):
'''
Initializes a trace object from a series and a metadata dictionary.
Series must be sampled at a particular sample rate
pi_prior is the starting probability of the HMM
a_prior is the transition matrix of the HMM
mean_prior is the initial mean value of each state
cov_prior is the initial covariance of each state
For an n-state HMM:
* pi_prior is a 1-D numpy array of size n
* a_prior is a 2-D numpy array of size n x n
* mean_prior is an numpy array of size n
* cov_prior is a 3-D numpy array that has been tiled into two rows,
one column, and n third dimensional states.
* ex) np.tile(1,(2,1,n)) for uniform covariance to start with.
'''
model = hmm.GaussianHMM(pi_prior.size,'full',pi_prior,a_prior)
model.means_ = mean_prior
model.covars_ = cov_prior
return model
def fit_trace_to_HMM(model,trace):
'''
Fits the given trace to the model. NaNs are turned into zeroes.
'''
trace_values = utils.trace_series_to_numpy_array(trace.series)
model.fit([trace_values])
startprob, means, covars, transmat = _sort_learnt_parameters(model.startprob_,
model.means_, model.covars_ , model.transmat_)
model=hmm.GaussianHMM(startprob.size, 'full', startprob, transmat)
model.means_ = means
model.covars_ = covars
return model
def fit_instance_to_HMM(model,instance):
'''
Fits the given instance to the model. NaNs are turned into zeroes.
'''
for trace in instance.traces:
model=fit_trace_to_HMM(model,trace)
return model
def generate_HMMs_from_type(type,pi_prior,a_prior,
mean_prior,cov_prior,key_for_model_name=None):
'''
Generates a dictionary of HMMs using each instance of given type.
The key to the dictionary is defined by the parameter 'key_for_model_name'
which looks at each instances metadata and uses the value from that key
in order to name the model. If no key is given, the model is named based on
its index.
'''
instance_models=OrderedDict()
for i,instance in enumerate(type.instances):
if(key_for_model_name):
instance_name=instance.traces[0].metadata[key_for_model_name]
else:
instance_name=i
instance_models[instance_name]=init_HMM(pi_prior,a_prior,mean_prior,cov_prior)
instance_models[instance_name]=fit_instance_to_HMM(instance_models[instance_name],
instance)
return instance_models
def generate_FHMM_from_HMMs(type_models):
'''
Takes a dictionary of models, where the keys are the device type name, and
generates an FHMM of these models. It returns the fhmm model as well as
a dictionary with the key being device type and each value being a list
containing the means for each state of that device type.
'''
list_pi=[]
list_A=[]
list_means=[]
means={}
variances={}
for device_type_name in type_models:
list_pi.append(type_models[device_type_name].startprob_)
list_A.append(type_models[device_type_name].transmat_)
list_means.append(type_models[device_type_name].means_.flatten().
tolist())
means[device_type_name]=type_models[device_type_name].means_
variances[device_type_name]=type_models[device_type_name].covars_
pi_combined=_compute_pi_fhmm(list_pi)
A_combined=_compute_A_fhmm(list_A)
[mean_combined, cov_combined]=_compute_means_fhmm(list_means)
model_fhmm=_create_combined_hmm(len(pi_combined),pi_combined,
A_combined, mean_combined, cov_combined)
return model_fhmm,means,variances
def predict_with_FHMM(model_fhmm,means,variances,power_total):
'''
Predicts the _decoded states and power for the given test data with the
given FHMM. test_data is a dictionary containing keys for each device
that is in the FHMM.
'''
learnt_states=model_fhmm.predict(power_total)
[_decoded_states,_decoded_power]=_decode_hmm(len(learnt_states), means,
variances, means.keys(), learnt_states)
np.putmask(_decoded_power['air1'],_decoded_power['air1'] >= power_total.T,
power_total.T)
return _decoded_states,_decoded_power
def predict_with_FHMM_temp(model_fhmm,means,variances,power_temp_total):
'''
Predicts the _decoded states and power for the given test data with the
given FHMM. test_data is a dictionary containing keys for each device
that is in the FHMM.
'''
learnt_states=model_fhmm.predict(power_total)
[_decoded_states,_decoded_power]=_decode_hmm(len(learnt_states), means,
variances, means.keys(), learnt_states)
np.putmask(_decoded_power['air1'],_decoded_power['air1'] >= power_total.T,
power_total.T)
return _decoded_states,_decoded_power
def plot_FHMM_and_predictions(test_data,_decoded_power):
'''
This plots the actual and predicted power based on the FHMM.
'''
for i,device_type in enumerate(test_data):
if(device_type is not 'use'):
plt.figure()
plt.plot(test_data[device_type],'g')
plt.plot(_decoded_power[device_type],'b')
plt.title('Ground Truth (Green) and Predicted (Blue) for %s' %device_type)
plt.ylabel('Power (W)')
plt.xlabel('Time')
plt.ylim((np.min(test_data[device_type])-10, np.max(test_data[device_type])+10))
plt.tight_layout()
def get_best_instance_model(instance_models,device_type,key_for_model_name):
dfs_model = {}
best_model_score = 0
for model_name in instance_models:
instances_of_model = []
for instance in device_type.instances:
test_trace = instance.traces[0]
instance_name = test_trace.metadata[key_for_model_name]
test = utils.trace_series_to_numpy_array(test_trace.series)
model_score = instance_models[model_name].score(test)
instances_of_model.append([model_name,instance_name,model_score])
if(model_score > best_model_score):
best_model = instance_models[model_name]
dfs_model[model_name] = pd.DataFrame(data=instances_of_model,
columns=['Model_Instance','Test_Instance','Value'])
model_averages = []
for key in dfs_model:
sum=0
for row in dfs_model[key].iterrows():
sum = sum+row[1]['Value']
model_averages.append([key,sum/len(dfs_model[key].index)])
print
avg_model_df = pd.DataFrame(data=model_averages,
columns=['Model_Instance','Avg Probability'])
print avg_model_df._sort('Avg Probability',ascending=False)
bestModel = avg_model_df._sort('Avg Probability',
ascending=False)._sort('Avg Probability',
ascending=False).head(1)['Model_Instance'].values[0]
print str(bestModel) + ' is best.'
return bestModel
def _sort_startprob(mapping, startprob):
'''
_sort the startprob of the HMM according to power means; as returned by mapping
'''
num_elements = len(startprob)
new_startprob = np.zeros(num_elements)
for i in xrange(len(startprob)):
new_startprob[i] = startprob[mapping[i]]
return new_startprob
def _sort_covars(mapping, covars):
num_elements = len(covars)
new_covars = np.zeros_like(covars)
for i in xrange(len(covars)):
new_covars[i] = covars[mapping[i]]
return new_covars
def _sort_transition_matrix(mapping, A):
'''
sorts the transition matrix of the HMM according to power means; as returned by mapping
'''
num_elements = len(A)
A_new = np.zeros((num_elements, num_elements))
for i in range(num_elements):
for j in range(num_elements):
A_new[i,j] = A[mapping[i], mapping[j]]
return A_new
def _return_sorting_mapping(means):
means_copy = deepcopy(means)
# _sorting
means_copy = np.sort(means_copy, axis = 0)
# Finding mapping
mapping = {}
mapping_set=set()
x=0
for i, val in enumerate(means_copy):
x= np.where(val==means)[0]
for val in x:
if val not in mapping_set:
mapping_set.add(val)
mapping[i]=val
break
return mapping
def _sort_learnt_parameters(startprob, means, covars, transmat):
'''
sorts the learnt parameters for the HMM
'''
mapping = _return_sorting_mapping(means)
means_new = np.sort(means, axis = 0)
startprob_new = _sort_startprob(mapping, startprob)
covars_new = _sort_covars(mapping, covars)
transmat_new = _sort_transition_matrix(mapping, transmat)
assert np.shape(means_new) == np.shape(means)
assert np.shape(startprob_new) == np.shape(startprob)
assert np.shape(transmat_new) == np.shape(transmat)
return [startprob_new, means_new, covars_new, transmat_new]
def _compute_pi_fhmm(list_pi):
'''
Input: list_pi: List of PI's of individual learnt HMMs
Output: Combined Pi for the FHMM
'''
result=list_pi[0]
for i in range(len(list_pi)-1):
result=np.kron(result,list_pi[i+1])
return result
def _compute_A_fhmm(list_A):
'''
Input: list_pi: List of PI's of individual learnt HMMs
Output: Combined Pi for the FHMM
'''
result=list_A[0]
for i in range(len(list_A)-1):
result=np.kron(result,list_A[i+1])
return result
def _compute_means_fhmm(list_means):
'''
Returns [mu, sigma]
'''
#list_of_appliances_centroids=[ [appliance[i][0] for i in range(len(appliance))] for appliance in list_B]
states_combination=list(itertools.product(*list_means))
num_combinations=len(states_combination)
means_stacked=np.array([sum(x) for x in states_combination])
means=np.reshape(means_stacked,(num_combinations,1))
cov=np.tile(5*np.identity(1), (num_combinations, 1, 1))
return [means, cov]
def _create_combined_hmm(n, pi, A, mean, cov):
combined_model=hmm.GaussianHMM(n_components=n,covariance_type='full', startprob=pi, transmat=A)
combined_model.covars_=cov
combined_model.means_=mean
return combined_model
def _decode_hmm(length_sequence, centroids, variance, appliance_list, states):
'''
decodes the HMM state sequence
'''
power_states_dict={}
hmm_states={}
hmm_power={}
total_num_combinations=1
for appliance in appliance_list:
total_num_combinations*=len(centroids[appliance])
for appliance in appliance_list:
hmm_states[appliance]=np.zeros(length_sequence,dtype=np.int)
hmm_power[appliance]=np.zeros(length_sequence)
for i in range(length_sequence):
factor=total_num_combinations
for appliance in appliance_list:
#assuming integer division (will cause errors in Python 3x)
factor=factor//len(centroids[appliance])
temp=int(states[i])/factor
hmm_states[appliance][i]=temp%len(centroids[appliance])
mu=centroids[appliance]
sigma=variance[appliance]
hmm_power[appliance][i]=np.array([0,np.random.normal(mu[1],sigma[1],
1)[0]]).reshape(2,1)[hmm_states[appliance][i]]
return [hmm_states,hmm_power]
def disaggregate_data(model_tuple, trace):
data=[]
power_total=utils.trace_series_to_numpy_array(trace.series)
[decoded_states, decoded_power]=predict_with_FHMM(model_tuple[0],
model_tuple[1],model_tuple[2],power_total)
for i,v in enumerate(decoded_power['air1']):
date_time=trace.series.index[i]
value=trace.series[i]
data.append({'date':date_time.strftime('%Y-%m-%d %H:%M'),
'dg': float(v),'reading':float(value)})
json_string = json.dumps(data, ensure_ascii=False,indent=4,
separators=(',', ': '))
return json_string
def get_simple_fhmm(means,ons,offs,pis,covs_on,covs_off):
hmms = {}
for i,(mean,on,off,cov_on,cov_off,pi) in enumerate(zip(means,ons,offs,covs_on,covs_off,pis)):
pi_prior = np.array([1 - pi,pi])
a_prior = np.array([[off, 1 - off],[1 - on,on]])
mean_prior = np.array([0,mean])[:,np.newaxis]
cov_prior = np.array([cov_on,cov_off])[:,np.newaxis,np.newaxis]
hmms["device_{}".format(i)] = init_HMM(pi_prior,a_prior,mean_prior,cov_prior)
appliance_hmm,_,_ = generate_FHMM_from_HMMs(hmms)
return appliance_hmm
def get_states(individual_means,appliance_fhmm,use):
states = appliance_fhmm.predict(use)
combinations = _get_combinations(individual_means.shape[0])
state_means = []
for combo in combinations:
state_means.append(np.sum(individual_means * combo))
decoded_state_key = sorted(zip(state_means,combinations), key = lambda x: x[0])
decoded_states = [decoded_state_key[state][1] for state in states]
return np.array(decoded_states)
def _get_combinations(n):
combos = []
for i in range(2**n):
combo = []
for j in range(n-1,-1,-1):
combo.append(int(2**j<=i))
if 2**j <= i:
i = i - 2**j
combos.append(combo)
return np.array(combos)
|
|
#!/usr/bin/env python
"""Start CockroachDB.
CockroachDB clusters need to be bootstrapped.
This is done by starting the very first node without the
--join=<ip1,ip2,...,ipN> parameter. Once bootstrapped, no node must
ever be started without the --join parameter again, doing so would
initialize a new cluster causing the old cluster to be effectively
discarded.
This poses an interesting problem for us as it means we need to know whether a
cluster has been bootstrapped before, from any of the masters in the cluster.
Additionally, once a cluster has been bootstrapped by starting a node in this
"initial master mode" all subsequent nodes need to be started with one or more
peer IP addresses provided to them via the --join<ip1,ip2,...,ipN> parameter.
As this list of IPs is used for discovery through the gossip protocol, not all
the provided IP addresses actually need to be up or reachable (that would
introduce a chicken and egg problem, anyway.) An example bootstrap sequence
would be:
node1:
./cockroach
node2:
./cockroach --join=node1
node3:
./cockroach --join=node1,node2
Then, after any crashes or server reboots, any of these nodes can be started
with the following command and they will discover one another:
./cockroach --join=node1,node2,node3
Here we have used the hostname of the nodes (node1, node2, etc.) but for DC/OS
we would use the internal IP addresses of the master nodes instead.
CockroachDB also supports a --pid-file parameter which writes the PID to
a file once the server is ready to serve requests.
The bootstrap and discovery strategy we designed is as follows:
1. Connect to ZooKeeper.
2. Determine whether the cluster has already been initialized by
checking whether the list of IPs at `ZK_NODES_PATH` exists. This
does not require the lock to be held as nodes can only ever be
added, never removed: if the list of IPs at `ZK_NODES_PATH` is
non-empty, we know the cluster has been bootstrapped.
3. If the list is empty:
3.1 Take and hold the ZK lock.
3.2 Check the `ZK_NODES_PATH` again to ensure the value hasn't been
updated since we checked it in step 2.
3.3 If it is now non-empty goto step 4 as the cluster has since been initialized.
3.4 If it is still empty, we need to bootstrap the cluster.
3.5 Start CockroachDB without the --join=... parameter to initialize
the new cluster. Stop it again.
3.6 Add the current node's IP address to the list at `ZK_NODES_PATH`.
3.7 Release the lock, write the list of nodes to `NODES_PATH_FILE` and exit 0.
4. If `ZK_NODES_PATH` is non-empty:
4.1 If our IP is not yet in the list, briefly take the ZK lock and add
our IP to ZK. Release the lock.
4.2 Write the list of node IPs to `NODES_PATH_FILE` and exit 0.
4.3 Exit 0. The cockroach.sh script will exec the cockroach service with
the --join parameter taken from `NODES_PATH_FILE`.
See
https://jira.mesosphere.com/browse/DCOS-16183 and then
https://jira.mesosphere.com/browse/DCOS-17886 and then
https://jira.mesosphere.com/browse/DCOS-17325
Note that for long-running processes using Kazoo and especially Kazoo's lock
recipe it is recommended to add a connection state change event handler that
takes care of communicating the current connection state to the rest of the
application so that it can respond to it (which enables e.g. delayed lock
release). This process here, however, is shortlived. Errors that occur during
ZooKeeper interaction lead to an application crash. In that case (when this
program exits with a non-zero exit code) the outer systemd wrapper makes sure
that potentially orphaned child processes (CockroachDB!) are killed and reaped.
"""
import json
import logging
import os
import pwd
import socket
import subprocess
from contextlib import contextmanager
from typing import Any, Generator, List, Optional
import requests
import retrying
from kazoo.client import KazooClient
from kazoo.exceptions import (
ConnectionLoss,
LockTimeout,
SessionExpiredError,
)
from kazoo.retry import KazooRetry
from kazoo.security import make_digest_acl
from requests import ConnectionError, HTTPError, Timeout
from dcos_internal_utils import utils
log = logging.getLogger(__name__)
def zk_connect(zk_user: Optional[str] = None, zk_secret: Optional[str] = None) -> KazooClient:
"""Connect to ZooKeeper.
On connection failure, the function attempts to reconnect indefinitely with exponential backoff
up to 3 seconds. If a command fails, that command is retried every 300ms for 3 attempts before failing.
These values are chosen to suit a human-interactive time.
Args:
zk_user:
The username to use when connecting to ZooKeeper or `None` if no authentication is necessary.
zk_secret:
The secret to use when connecting to ZooKeeper or `None` if no authentication is necessary.
Returns:
A ZooKeeper client connection in the form of a `kazoo.client.KazooClient`.
"""
# Try to reconnect indefinitely, with time between updates going
# exponentially to ~3s. Then every retry occurs every ~3 seconds.
conn_retry_policy = KazooRetry(
max_tries=-1,
delay=0.3,
backoff=1.3,
max_jitter=1,
max_delay=3,
ignore_expire=True,
)
# Retry commands every 0.3 seconds, for a total of <1s (usually 0.9)
cmd_retry_policy = KazooRetry(
max_tries=3,
delay=0.3,
backoff=1,
max_jitter=0.1,
max_delay=1,
ignore_expire=False,
)
default_acl = None
auth_data = None
if zk_user and zk_secret:
default_acl = [make_digest_acl(zk_user, zk_secret, all=True)]
scheme = 'digest'
credential = "{}:{}".format(zk_user, zk_secret)
auth_data = [(scheme, credential)]
zk = KazooClient(
hosts="127.0.0.1:2181",
timeout=30,
connection_retry=conn_retry_policy,
command_retry=cmd_retry_policy,
default_acl=default_acl,
auth_data=auth_data,
)
zk.start()
return zk
# The prefix used for cockroachdb in ZK.
ZK_PATH = "/cockroach"
# The path of the ZNode used for locking.
ZK_LOCK_PATH = ZK_PATH + "/lock"
# The path of the ZNode containing the list of cluster members.
ZK_NODES_PATH = ZK_PATH + "/nodes"
# The id to use when contending for the ZK lock.
LOCK_CONTENDER_ID = "{hostname}:{pid}".format(
hostname=socket.gethostname(),
pid=os.getpid(),
)
# The path to the CockroachDB PID file.
PID_FILE_PATH = '/run/dcos/cockroach/cockroach.pid'
# The path to the file containing the list of nodes in the cluster as a
# comma-separated list of IPs.
NODES_FILE_PATH = '/run/dcos/cockroach/nodes'
# The time in seconds to wait when attempting to acquire a lock. Lock
# acquisition between 5 ZooKeeper nodes is an operation on the order
# of milliseconds.
#
# Furthermore, the operations performed while the lock is held are
# performed once and never again. This means a process will only
# contend for the lock once. As such, if lock aquisition fails due to
# some other process holding it, the current process will crash and be
# restarted with one less contender for the same lock. This means that
# the locking behaviour does converge and no timeout-sensitive
# livelock can occur.
#
# We set the lock timeout to a couple of seconds instead of
# milliseconds to account for variation in network latency between
# nodes in the cluster. The current value has so far shown to be
# sufficient.
ZK_LOCK_TIMEOUT = 5
@contextmanager
def _zk_lock(zk: KazooClient, lock_path: str, contender_id: str, timeout: int) -> Generator:
"""
This contextmanager takes a ZooKeeper lock, yields, then releases
the lock. This lock behaves like an interprocess mutex lock.
ZooKeeper allows one to read values without holding a lock, but
there is no guarantee that you will read the latest value. To read
the latest value, you must call `sync()` on a ZNode before calling
`get()`.
Args:
zk:
The client to use to communicate with ZooKeeper.
lock_path:
The ZNode path to use as prefix for the locking recipe.
contender_id:
The contender id to identify the current client
in the locking recipe.
timeout:
Time in seconds to wait for the lock to be acquired.
If this time elapses before the lock is acquired, a
`kazoo.exceptions.LockTimeout` exception is raised.
Raises:
kazoo.exceptions.LockTimeout:
If the `timeout` is exceeded without the lock being acquired.
"""
lock = zk.Lock(lock_path, contender_id)
try:
log.info("Acquiring ZooKeeper lock.")
lock.acquire(blocking=True, timeout=timeout)
except (ConnectionLoss, SessionExpiredError) as e:
msg_fmt = "Failed to acquire lock: {}"
msg = msg_fmt.format(e.__class__.__name__)
log.exception(msg)
raise e
except LockTimeout as e:
msg_fmt = "Failed to acquire lock in `{}` seconds"
msg = msg_fmt.format(timeout)
log.exception(msg)
raise e
else:
log.info("ZooKeeper lock acquired.")
yield
log.info("Releasing ZooKeeper lock")
lock.release()
log.info("ZooKeeper lock released.")
def _init_cockroachdb_cluster(ip: str) -> None:
"""
Starts CockroachDB listening on `ip`. It waits until the cluster ID is
published via the local gossip endpoint, signalling that the instance has
successfully initialized the cluster. Thereafter it shuts down the
bootstrap CockroachDB instance again.
Args:
ip:
The IP that CockroachDB should listen on.
This should be the internal IP of the current host.
"""
# We chose 1 second as a time to wait between retries.
# If we chose a value longer than this, we could wait for up to <chosen
# value> too long between retries, delaying the cluster start by up to that
# value.
#
# The more often we run this function, the more logs we generate.
# If we chose a value smaller than 1 second, we would therefore generate more logs.
# We consider 1 second to be a reasonable maximum delay and in our
# experience the log size has not been an issue.
wait_fixed_ms = 1000
# In our experience, the cluster is always initialized within one minute.
# The downside of having a timeout which is too short is that we may crash
# when the cluster was on track to becoming healthy.
#
# The downside of having a timeout which is too long is that we may wait up
# to that timeout to see an error.
#
# We chose 5 minutes as trade-off between these two, as we think that it
# is extremely unlikely that a successful initialization will take more
# than 5 minutes, and we think that waiting up to 5 minutes "too long"
# for an error (and accumulating 5 minutes of logs) is not so bad.
stop_max_delay_ms = 5 * 60 * 1000
@retrying.retry(
wait_fixed=wait_fixed_ms,
stop_max_delay=stop_max_delay_ms,
retry_on_result=lambda x: x is False,
)
def _wait_for_cluster_init() -> bool:
"""
CockroachDB Cluster initialization takes a certain amount of time
while the cluster ID and node ID are written to the storage.
If after 5 minutes of attempts the cluster ID is not available raise an
exception.
"""
gossip_url = 'http://localhost:8090/_status/gossip/local'
# 3.05 is a magic number for the HTTP ConnectTimeout.
# http://docs.python-requests.org/en/master/user/advanced/#timeouts
# The rationale is to set connect timeouts to slightly larger than a multiple of 3,
# which is the default TCP packet retransmission window.
# 27 is the ReadTimeout, taken from the same example.
connect_timeout_seconds = 3.05
# In our experience, we have not seen a read timeout of > 1 second.
# If this were extremely long, we may have to wait up to that amount of time to see an error.
# If this were too short, and for example CockroachDB were spending a long time to respond
# because it is busy or on slow hardware, we may retry this function
# even when the cluster is initialized.
# Therefore we choose a long timeout which is not expected uncomfortably long for operators.
read_timeout_seconds = 30
request_timeout = (connect_timeout_seconds, read_timeout_seconds)
try:
response = requests.get(gossip_url, timeout=request_timeout)
except (ConnectionError, Timeout) as exc:
message = (
'Retrying GET request to {url} as error {exc} was given.'
).format(url=gossip_url, exc=exc)
log.info(message)
return False
try:
response.raise_for_status()
except HTTPError as exc:
# 150 bytes was chosen arbitrarily as it might not be so long as to
# cause annoyance in a console, but it might be long enough to show
# some useful data.
first_150_bytes = response.content[:150]
decoded_first_150_bytes = first_150_bytes.decode(
encoding='ascii',
errors='backslashreplace',
)
message = (
'Retrying GET request to {url} as status code {status_code} was given.'
'The first 150 bytes of the HTTP response, '
'decoded with the ASCII character encoding: '
'"{resp_text}".'
).format(
url=gossip_url,
status_code=response.status_code,
resp_text=decoded_first_150_bytes,
)
log.info(message)
return False
output = json.loads(response.text)
try:
cluster_id_bytes = output['infos']['cluster-id']['value']['rawBytes']
except KeyError:
return False
log.info((
'Cluster ID bytes {} present in local gossip endpoint.'
).format(cluster_id_bytes))
return True
# By default cockroachdb grows the cache to 25% of available
# memory. This makes no sense given our comparatively tiny amount
# of data. The following limit of 100MiB was experimentally
# determined to result in good enough performace for the IAM with
# 60k groups, 15k users and 15 resources per group.
#
# This was done by populating a cockroachdb instance with the
# aforementioend data and observing <100MiB and query performance was
# still very fast <100ms /permissions queries.
#
# When cockroachdb gains more tenant services and performance is
# found to drop, this value may be adjusted empirically.
cachesize = '100MiB'
cockroach_args = [
'/opt/mesosphere/active/cockroach/bin/cockroach',
'start',
'--logtostderr',
'--cache={}'.format(cachesize),
'--store=/var/lib/dcos/cockroach',
'--insecure',
'--advertise-host={}'.format(ip),
'--host={}'.format(ip),
'--port=26257',
'--http-host=127.0.0.1',
'--http-port=8090',
'--pid-file={}'.format(PID_FILE_PATH),
'--log-dir='
]
# Launch CockroachDB as the 'dcos_cockroach' user so file and directory ownership are set correctly.
dcos_cockroach_uid = pwd.getpwnam('dcos_cockroach').pw_uid
def run_as_dcos_cockroach() -> Any:
"""
This function is a hack to make `os.setuid()`'s type signature match what mypy is expecting
for preexec_fn.
"""
os.setuid(dcos_cockroach_uid)
return
log.info("Initializing CockroachDB cluster: {}".format(' '.join(cockroach_args)))
proc = subprocess.Popen(
cockroach_args,
preexec_fn=run_as_dcos_cockroach,
)
log.info("Waiting for CockroachDB to become ready.")
_wait_for_cluster_init()
log.info("CockroachDB cluster has been initialized.")
# Terminate CockroachDB instance to start it via systemd unit again later.
log.info("Terminating CockroachDB bootstrap instance.")
# Send SIGTERM to the cockroach process to trigger a graceful shutdown.
proc.terminate()
# We pass no timeout and rely on systemd to stop this process after
# `TimeoutStartSec` as specified in the unit file.
proc.wait()
log.info("Terminated CockroachDB bootstrap instance.")
def _get_registered_nodes(zk: KazooClient, zk_path: str) -> List[str]:
"""
Return the IPs of nodes that have registered in ZooKeeper.
The ZNode `zk_path` is expected to exist, having been
created during cluster bootstrap.
Args:
zk:
The client to use to communicate with ZooKeeper.
zk_path:
The path of the ZNode to use for node registration.
Returns:
A list of internal IP addresses of nodes that have
previously joined the CockroachDB cluster.
"""
# We call `sync()` before reading the value in order to
# read the latest data written to ZooKeeper.
# See https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#ch_zkGuarantees
log.info("Calling sync() on ZNode `{}`".format(zk_path))
zk.sync(zk_path)
log.info("Loading data from ZNode `{}`".format(zk_path))
data, _ = zk.get(zk_path)
if data:
log.info("Cluster was previously initialized.")
nodes = json.loads(data.decode('ascii'))['nodes'] # type: List[str]
log.info("Found registered nodes: {}".format(nodes))
return nodes
log.info("Found no registered nodes.")
return []
def _register_cluster_membership(zk: KazooClient, zk_path: str, ip: str) -> List[str]:
"""
Add `ip` to the list of cluster members registered in ZooKeeper.
The ZK lock must be held around the call to this function.
Args:
zk:
The client to use to communicate with ZooKeeper.
zk_path:
The path of the ZNode to use for node registration.
ip:
The ip to add to the list of cluster member IPs in ZooKeeper.
"""
log.info("Registering cluster membership for `{}`".format(ip))
# Get the latest list of cluster members.
nodes = _get_registered_nodes(zk=zk, zk_path=zk_path)
if ip in nodes:
# We're already registered with ZK.
log.info("Cluster member `{}` already registered in ZooKeeper. Skipping.".format(ip))
return nodes
log.info("Adding `{}` to list of nodes `{}`".format(ip, nodes))
nodes.append(ip)
zk.set(zk_path, json.dumps({"nodes": nodes}).encode("ascii"))
zk.sync(zk_path)
log.info("Successfully registered cluster membership for `{}`".format(ip))
return nodes
def _dump_nodes_to_file(nodes: List[str], file_path: str) -> None:
with open(file_path, 'w') as f:
log.info("Writing nodes {} to file {}".format(','.join(nodes), file_path))
f.write(','.join(nodes))
def main() -> None:
logging.basicConfig(format='[%(levelname)s] %(message)s', level='INFO')
# Determine our internal IP.
my_ip = utils.detect_ip()
log.info("My IP is `{}`".format(my_ip))
# Connect to ZooKeeper.
log.info("Connecting to ZooKeeper.")
zk_user = os.environ.get('DATASTORE_ZK_USER')
zk_secret = os.environ.get('DATASTORE_ZK_SECRET')
zk = zk_connect(zk_user=zk_user, zk_secret=zk_secret)
# We are connected to ZooKeeper.
# Ensure that the ZNodes exist.
zk.ensure_path("/cockroach")
zk.ensure_path("/cockroach/nodes")
zk.ensure_path("/cockroach/locking")
# Determine whether the cluster has been bootstrapped already by
# checking whether the `ZK_NODES_PATH` ZNode has children. This is
# best-effort as we aren't holding the lock, but we do call
# `zk.sync()` which is supposed to ensure that we read the latest
# value from ZK.
nodes = _get_registered_nodes(zk=zk, zk_path=ZK_NODES_PATH)
if nodes:
# The cluster has already been initialized. Dump the node IPs to
# `NODES_FILE_PATH` and exit.
log.info("Cluster has members registered already: {}".format(nodes))
if my_ip not in nodes:
log.info("IP not found in list of nodes. Registering cluster membership.")
with _zk_lock(zk=zk, lock_path=ZK_LOCK_PATH, contender_id=LOCK_CONTENDER_ID, timeout=ZK_LOCK_TIMEOUT):
nodes = _register_cluster_membership(zk=zk, zk_path=ZK_NODES_PATH, ip=my_ip)
_dump_nodes_to_file(nodes, NODES_FILE_PATH)
log.info("Registration complete. ")
return
# No cockroachdb nodes have been registered with ZK yet. We
# assume that we need to bootstrap the cluster so we take the ZK
# lock and hold it until the cluster is bootstrapped and our IP
# has been successfully registered with ZK.
#
# The lock needs to be held around the entire cockroachdb startup
# procedure as only the first instance should start without the
# --join parameter (and thereby bootstrap the cluster.) This lock
# prevents multiple instances from starting without --join at the
# same time.
#
# If we fail to acquire the lock it means a peer is already
# bootstrapping the cluster. We should crash and when we get
# restarted by systemd, we expect to see that the cluster has been
# bootstrapped and will enter that alternative code path which
# leads to an eventually converged cluster.
with _zk_lock(zk=zk, lock_path=ZK_LOCK_PATH, contender_id=LOCK_CONTENDER_ID, timeout=ZK_LOCK_TIMEOUT):
# We check that the cluster hasn't been bootstrapped since we
# first read the list of nodes from ZK.
log.info("Checking for registered nodes while holding lock.")
nodes = _get_registered_nodes(zk=zk, zk_path=ZK_NODES_PATH)
if nodes:
# The cluster has been bootstrapped since we checked. We join the
# existing cluster and dump the node IPs.
log.info("Cluster has already been initialized: {}".format(nodes))
nodes = _register_cluster_membership(zk=zk, zk_path=ZK_NODES_PATH, ip=my_ip)
_dump_nodes_to_file(nodes, NODES_FILE_PATH)
return
else:
log.info("Cluster has not been initialized yet.")
# The cluster still has not been bootstrapped. We start
# cockroachdb without a list of cluster IPs to join,
# which will cause it to bootstrap the cluster.
_init_cockroachdb_cluster(ip=my_ip)
# Only now that the CockroachDB cluster has been initialized, we
# add our IP to the list of nodes that have successfully joined the
# cluster at one stage or another.
#
# If this fails the fact that a cluster was initialized will be
# ignored by subsequent runs as our IP won't be present in ZK.
nodes = _register_cluster_membership(zk=zk, zk_path=ZK_NODES_PATH, ip=my_ip)
_dump_nodes_to_file(nodes, NODES_FILE_PATH)
log.info("Successfully initialized cluster.")
return
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
from __future__ import print_function
"""
test_split_subdivide_checkpointing.py
"""
tempdir = "testing_dir/"
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = __import__ (ruffus_name)
for attr in "pipeline_run", "pipeline_printout", "originate", "split", "transform", "subdivide", "formatter", "Pipeline":
globals()[attr] = getattr (ruffus, attr)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import unittest
import shutil
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Each time the pipeline is FORCED to rerun,
# More files are created for each task
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
@originate([tempdir + 'start'])
def make_start(outfile):
"""
-> start
"""
open(outfile, 'w').close()
@split(make_start, tempdir + '*.split')
def split_start(infiles, outfiles):
"""
-> XXX.split
where XXX = 0 .. N,
N = previous N + 1
"""
# split always runs exactly one job (unlike @subdivide)
# So it implicitly combines all its inputs before running and generating multiple output
# @originate generates multiple output so the input for @split is a list...
infile = infiles[0]
# clean up previous
for f in outfiles:
os.unlink(f)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# Create more files than the previous invocation
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
n_to_produce = len(outfiles) + 1
for i in range(n_to_produce):
f = '{}{}.split'.format(tempdir, i)
open(f, 'a').close()
@subdivide(split_start, formatter(), tempdir + '{basename[0]}_*.subdivided', tempdir + '{basename[0]}')
def subdivide_start(infile, outfiles, infile_basename):
# cleanup existing
for f in outfiles:
os.unlink(f)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# Create more files than the previous invocation
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
n_to_produce = len(outfiles) + 1
for i in range( n_to_produce):
open('{}_{}.subdivided'.format(infile_basename, i), 'a').close()
class Test_ruffus(unittest.TestCase):
def tearDown(self):
# only tear down if not throw exception so we can debug?
try:
shutil.rmtree(tempdir)
except:
pass
def setUp(self):
try:
shutil.rmtree(tempdir)
except:
pass
os.makedirs(tempdir)
def check_file_exists_or_not_as_expected(self, expected_files, not_expected_files):
"""
Check if files exist / not exist
"""
for ee in expected_files:
if not os.path.exists(tempdir + ee):
raise Exception("Expected file %s" % (tempdir + ee))
for ne in not_expected_files:
if os.path.exists(tempdir + ne):
raise Exception("Unexpected file %s" % (tempdir + ne ))
def test_newstyle_ruffus (self):
test_pipeline = Pipeline("test")
test_pipeline.originate(task_func = make_start, output = [tempdir + 'start'])
test_pipeline.split(task_func = split_start, input = make_start, output = tempdir + '*.split')
test_pipeline.subdivide(task_func = subdivide_start, input = split_start, filter = formatter(), output = tempdir + '{basename[0]}_*.subdivided', extras = [tempdir + '{basename[0]}'])
expected_files_after_1_runs = ["start", "0.split", "0_0.subdivided"]
expected_files_after_2_runs = ["1.split", "0_1.subdivided", "1_0.subdivided"]
expected_files_after_3_runs = ["2.split", "0_2.subdivided", "1_1.subdivided", "2_0.subdivided"]
expected_files_after_4_runs = ["3.split", "0_3.subdivided", "1_2.subdivided", "2_1.subdivided", "3_0.subdivided"]
print(" Run pipeline normally...")
test_pipeline.run(multiprocess = 10, verbose=0)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs,
expected_files_after_2_runs)
print(" Check that running again does nothing. (All up to date).")
test_pipeline.run(multiprocess = 10, verbose=0)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs,
expected_files_after_2_runs)
print(" Running again with forced tasks to generate more files...")
test_pipeline.run(forcedtorun_tasks = ["test::make_start"], multiprocess = 10, verbose=0)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs,
expected_files_after_3_runs)
print(" Check that running again does nothing. (All up to date).")
test_pipeline.run(multiprocess = 10, verbose=0)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs,
expected_files_after_3_runs)
print(" Running again with forced tasks to generate even more files...")
test_pipeline.run(forcedtorun_tasks = make_start, multiprocess = 10, verbose=0)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs
+ expected_files_after_3_runs,
expected_files_after_4_runs)
print(" Check that running again does nothing. (All up to date).")
test_pipeline.run(multiprocess = 10, verbose=0)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs
+ expected_files_after_3_runs,
expected_files_after_4_runs)
def test_ruffus (self):
expected_files_after_1_runs = ["start", "0.split", "0_0.subdivided"]
expected_files_after_2_runs = ["1.split", "0_1.subdivided", "1_0.subdivided"]
expected_files_after_3_runs = ["2.split", "0_2.subdivided", "1_1.subdivided", "2_0.subdivided"]
expected_files_after_4_runs = ["3.split", "0_3.subdivided", "1_2.subdivided", "2_1.subdivided", "3_0.subdivided"]
print(" Run pipeline normally...")
pipeline_run(multiprocess = 10, verbose=0, pipeline= "main")
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs,
expected_files_after_2_runs)
print(" Check that running again does nothing. (All up to date).")
pipeline_run(multiprocess = 10, verbose=0, pipeline= "main")
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs,
expected_files_after_2_runs)
print(" Running again with forced tasks to generate more files...")
pipeline_run(forcedtorun_tasks = [make_start], multiprocess = 10, verbose=0, pipeline= "main")
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs,
expected_files_after_3_runs)
print(" Check that running again does nothing. (All up to date).")
pipeline_run(multiprocess = 10, verbose=0, pipeline= "main")
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs,
expected_files_after_3_runs)
print(" Running again with forced tasks to generate even more files...")
pipeline_run(forcedtorun_tasks = [make_start], multiprocess = 10, verbose=0, pipeline= "main")
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs
+ expected_files_after_3_runs,
expected_files_after_4_runs)
print(" Check that running again does nothing. (All up to date).")
pipeline_run(multiprocess = 10, verbose=0, pipeline= "main")
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs
+ expected_files_after_3_runs,
expected_files_after_4_runs)
if __name__ == '__main__':
unittest.main()
|
|
# This script is executed in the main console namespace so
# that all the variables defined here become console variables.
import ddapp
import os
import sys
import PythonQt
import json
from PythonQt import QtCore, QtGui
from time import time
import imp
import ddapp.applogic as app
from ddapp import drcargs
from ddapp import vtkAll as vtk
from ddapp import matlab
from ddapp import jointcontrol
from ddapp import callbacks
from ddapp import camerabookmarks
from ddapp import cameracontrol
from ddapp import bihandeddemo
from ddapp import debrisdemo
from ddapp import doordemo
from ddapp import drilldemo
from ddapp import tabledemo
from ddapp import mappingdemo
from ddapp import valvedemo
from ddapp import drivingplanner
from ddapp import egressplanner
from ddapp import polarisplatformplanner
from ddapp import surprisetask
from ddapp import continuouswalkingdemo
from ddapp import sitstandplanner
from ddapp import walkingtestdemo
from ddapp import terraintask
from ddapp import ik
from ddapp import ikplanner
from ddapp import objectmodel as om
from ddapp import spreadsheet
from ddapp import transformUtils
from ddapp import tdx
from ddapp import skybox
from ddapp import perception
from ddapp import segmentation
from ddapp import cameraview
from ddapp import colorize
from ddapp import drakevisualizer
from ddapp.fieldcontainer import FieldContainer
from ddapp import robotstate
from ddapp import roboturdf
from ddapp import robotsystem
from ddapp import affordancepanel
from ddapp import filterUtils
from ddapp import footstepsdriver
from ddapp import footstepsdriverpanel
from ddapp import framevisualization
from ddapp import lcmloggerwidget
from ddapp import lcmgl
from ddapp import atlasdriver
from ddapp import atlasdriverpanel
from ddapp import multisensepanel
from ddapp import navigationpanel
from ddapp import mappingpanel
from ddapp import handcontrolpanel
from ddapp import sensordatarequestpanel
from ddapp import tasklaunchpanel
from ddapp import pfgrasp
from ddapp import pfgrasppanel
from ddapp.jointpropagator import JointPropagator
from ddapp import coursemodel
from ddapp import copmonitor
from ddapp import robotplanlistener
from ddapp import handdriver
from ddapp import planplayback
from ddapp import playbackpanel
from ddapp import screengrabberpanel
from ddapp import splinewidget
from ddapp import teleoppanel
from ddapp import vtkNumpy as vnp
from ddapp import viewbehaviors
from ddapp import visualization as vis
from ddapp import actionhandlers
from ddapp.timercallback import TimerCallback
from ddapp.pointpicker import PointPicker, ImagePointPicker
from ddapp import segmentationpanel
from ddapp import lcmUtils
from ddapp.utime import getUtime
from ddapp.shallowCopy import shallowCopy
from ddapp import segmentationroutines
from ddapp import trackers
from ddapp import gamepad
from ddapp import blackoutmonitor
from ddapp.tasks import robottasks as rt
from ddapp.tasks import taskmanagerwidget
from ddapp.tasks.descriptions import loadTaskDescriptions
import drc as lcmdrc
from collections import OrderedDict
import functools
import math
import numpy as np
from ddapp.debugVis import DebugData
from ddapp import ioUtils as io
drcargs.requireStrict()
drcargs.args()
app.startup(globals())
om.init(app.getMainWindow().objectTree(), app.getMainWindow().propertiesPanel())
actionhandlers.init()
quit = app.quit
exit = quit
view = app.getDRCView()
camera = view.camera()
tree = app.getMainWindow().objectTree()
orbit = cameracontrol.OrbitController(view)
showPolyData = segmentation.showPolyData
updatePolyData = segmentation.updatePolyData
###############################################################################
robotSystem = robotsystem.create(view)
globals().update(dict(robotSystem))
useIk = True
useAtlasConvexHull = False
useRobotState = True
usePerception = True
useGrid = True
useSpreadsheet = True
useFootsteps = True
useHands = True
usePlanning = True
useAtlasDriver = True
useLCMGL = True
useLightColorScheme = True
useLoggingWidget = True
useDrakeVisualizer = True
useNavigationPanel = True
useFootContactVis = True
useFallDetectorVis = True
useImageWidget = False
useCameraFrustumVisualizer = True
useControllerRate = True
useForceDisplay = False
useSkybox = False
useDataFiles = True
usePFGrasp = False
useGamepad = True
useBlackoutText = True
useRandomWalk = True
useCOPMonitor = True
useCourseModel = False
poseCollection = PythonQt.dd.ddSignalMap()
costCollection = PythonQt.dd.ddSignalMap()
if useSpreadsheet:
spreadsheet.init(poseCollection, costCollection)
if useIk:
def onIkStartup(ikServer, startSuccess):
if startSuccess:
app.getMainWindow().statusBar().showMessage('Planning server started.', 2000)
else:
app.showErrorMessage('Error detected while starting the matlab planning server. '
'Please check the output console for more information.', title='Error starting matlab')
ikServer.outputConsole = app.getOutputConsole()
ikServer.infoFunc = app.displaySnoptInfo
ikServer.connectStartupCompleted(onIkStartup)
startIkServer()
if useAtlasDriver:
atlasdriver.systemStatus.outputConsole = app.getOutputConsole()
atlasdriverpanel.init(atlasDriver)
if usePerception:
segmentationpanel.init()
cameraview.init()
colorize.init()
cameraview.cameraView.rayCallback = segmentation.extractPointsAlongClickRay
multisensepanel.init(perception.multisenseDriver, neckDriver)
sensordatarequestpanel.init()
# for kintinuous, use 'CAMERA_FUSED', 'CAMERA_TSDF'
disparityPointCloud = segmentation.DisparityPointCloudItem('stereo point cloud', 'CAMERA', 'CAMERA_LEFT', cameraview.imageManager)
disparityPointCloud.addToView(view)
om.addToObjectModel(disparityPointCloud, parentObj=om.findObjectByName('sensors'))
def createPointerTracker():
return trackers.PointerTracker(robotStateModel, disparityPointCloud)
if useGrid:
grid = vis.showGrid(view, color=[0,0,0], alpha=0.1)
grid.setProperty('Surface Mode', 'Surface with edges')
app.setBackgroundColor([0.3, 0.3, 0.35], [0.95,0.95,1])
viewOptions = vis.ViewOptionsItem(view)
om.addToObjectModel(viewOptions, parentObj=om.findObjectByName('sensors'))
class ViewBackgroundLightHandler(object):
def __init__(self, viewOptions, grid):
self.viewOptions = viewOptions
self.action = app.getToolsMenuActions()['ActionToggleBackgroundLight']
self.action.connect('triggered()', self.toggle)
self.properties = { viewOptions : {'Gradient background':True, 'Background color':[0.0, 0.0, 0.0], 'Background color 2':[0.3, 0.3, 0.3]},
grid : {'Surface Mode':'Wireframe', 'Alpha':0.05, 'Color':[1.0, 1.0, 1.0], 'Color By':0}
}
self.cachedProperties = {}
self.storeProperties()
def storeProperties(self):
def grab(obj, props):
for key in props.keys():
self.cachedProperties.setdefault(obj, dict())[key] = obj.getProperty(key)
for obj, props in self.properties.iteritems():
grab(obj, props)
def applyProperties(self, properties):
def send(obj, props):
for key, value in props.iteritems():
obj.setProperty(key, value)
for obj, props in properties.iteritems():
send(obj, props)
def toggle(self):
if self.action.checked:
self.storeProperties()
self.applyProperties(self.properties)
else:
self.applyProperties(self.cachedProperties)
viewBackgroundLightHandler = ViewBackgroundLightHandler(viewOptions, grid)
if not useLightColorScheme:
viewBackgroundLightHandler.action.trigger()
if useHands:
handcontrolpanel.init(lHandDriver, rHandDriver, robotStateModel, robotStateJointController, view)
if useFootsteps:
footstepsPanel = footstepsdriverpanel.init(footstepsDriver, robotStateModel, robotStateJointController, irisDriver)
if useLCMGL:
lcmglManager = lcmgl.init(view)
app.MenuActionToggleHelper('Tools', 'Renderer - LCM GL', lcmglManager.isEnabled, lcmglManager.setEnabled)
if useDrakeVisualizer:
drakeVisualizer = drakevisualizer.DrakeVisualizer(view)
app.MenuActionToggleHelper('Tools', 'Renderer - Drake', drakeVisualizer.isEnabled, drakeVisualizer.setEnabled)
if usePlanning:
def showPose(pose):
playbackRobotModel.setProperty('Visible', True)
playbackJointController.setPose('show_pose', pose)
def playPlan(plan):
playPlans([plan])
def playPlans(plans):
planPlayback.stopAnimation()
playbackRobotModel.setProperty('Visible', True)
planPlayback.playPlans(plans, playbackJointController)
def playManipPlan():
playPlan(manipPlanner.lastManipPlan)
def playWalkingPlan():
playPlan(footstepsDriver.lastWalkingPlan)
def plotManipPlan():
planPlayback.plotPlan(manipPlanner.lastManipPlan)
def planStand():
ikPlanner.computeStandPlan(robotStateJointController.q)
def planNominal():
ikPlanner.computeNominalPlan(robotStateJointController.q)
def fitDrillMultisense():
pd = om.findObjectByName('Multisense').model.revPolyData
om.removeFromObjectModel(om.findObjectByName('debug'))
segmentation.findAndFitDrillBarrel(pd)
def refitBlocks(autoApprove=True):
polyData = om.findObjectByName('Multisense').model.revPolyData
segmentation.updateBlockAffordances(polyData)
if autoApprove:
approveRefit()
def approveRefit():
for obj in om.getObjects():
if isinstance(obj, segmentation.BlockAffordanceItem):
if 'refit' in obj.getProperty('Name'):
originalObj = om.findObjectByName(obj.getProperty('Name').replace(' refit', ''))
if originalObj:
originalObj.params = obj.params
originalObj.polyData.DeepCopy(obj.polyData)
originalObj.actor.GetUserTransform().SetMatrix(obj.actor.GetUserTransform().GetMatrix())
originalObj.actor.GetUserTransform().Modified()
obj.setProperty('Visible', False)
def sendDataRequest(requestType, repeatTime=0.0):
msg = lcmdrc.data_request_t()
msg.type = requestType
msg.period = int(repeatTime*10) # period is specified in tenths of a second
msgList = lcmdrc.data_request_list_t()
msgList.utime = getUtime()
msgList.requests = [msg]
msgList.num_requests = len(msgList.requests)
lcmUtils.publish('DATA_REQUEST', msgList)
def sendSceneHeightRequest(repeatTime=0.0):
sendDataRequest(lcmdrc.data_request_t.HEIGHT_MAP_SCENE, repeatTime)
def sendWorkspaceDepthRequest(repeatTime=0.0):
sendDataRequest(lcmdrc.data_request_t.DEPTH_MAP_WORKSPACE_C, repeatTime)
def sendSceneDepthRequest(repeatTime=0.0):
sendDataRequest(lcmdrc.data_request_t.DEPTH_MAP_SCENE, repeatTime)
def sendFusedDepthRequest(repeatTime=0.0):
sendDataRequest(lcmdrc.data_request_t.FUSED_DEPTH, repeatTime)
def sendFusedHeightRequest(repeatTime=0.0):
sendDataRequest(lcmdrc.data_request_t.FUSED_HEIGHT, repeatTime)
teleopJointPropagator = JointPropagator(robotStateModel, teleopRobotModel, roboturdf.getRobotiqJoints() + ['neck_ay'])
playbackJointPropagator = JointPropagator(robotStateModel, playbackRobotModel, roboturdf.getRobotiqJoints())
def doPropagation(model=None):
if teleopRobotModel.getProperty('Visible'):
teleopJointPropagator.doPropagation()
if playbackRobotModel.getProperty('Visible'):
playbackJointPropagator.doPropagation()
robotStateModel.connectModelChanged(doPropagation)
#app.addToolbarMacro('scene height', sendSceneHeightRequest)
#app.addToolbarMacro('scene depth', sendSceneDepthRequest)
#app.addToolbarMacro('stereo height', sendFusedHeightRequest)
#app.addToolbarMacro('stereo depth', sendFusedDepthRequest)
jointLimitChecker = teleoppanel.JointLimitChecker(robotStateModel, robotStateJointController)
jointLimitChecker.setupMenuAction()
jointLimitChecker.start()
spindleSpinChecker = multisensepanel.SpindleSpinChecker(spindleMonitor)
spindleSpinChecker.setupMenuAction()
postureShortcuts = teleoppanel.PosturePlanShortcuts(robotStateJointController, ikPlanner)
def drillTrackerOn():
om.findObjectByName('Multisense').model.showRevolutionCallback = fitDrillMultisense
def drillTrackerOff():
om.findObjectByName('Multisense').model.showRevolutionCallback = None
def fitPosts():
segmentation.fitVerticalPosts(segmentation.getCurrentRevolutionData())
affordancePanel.onGetRaycastTerrain()
ikPlanner.addPostureGoalListener(robotStateJointController)
if 'fixedBaseArm' in drcargs.getDirectorConfig()['userConfig']:
ikPlanner.fixedBaseArm = True
playbackPanel = playbackpanel.init(planPlayback, playbackRobotModel, playbackJointController,
robotStateModel, robotStateJointController, manipPlanner)
footstepsDriver.walkingPlanCallback = playbackPanel.setPlan
manipPlanner.connectPlanReceived(playbackPanel.setPlan)
teleopPanel = teleoppanel.init(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController,
ikPlanner, manipPlanner, affordanceManager, playbackPanel.setPlan, playbackPanel.hidePlan)
if useGamepad:
gamePad = gamepad.Gamepad(teleopPanel, teleopJointController, ikPlanner, view)
if useBlackoutText:
blackoutMonitor = blackoutmonitor.BlackoutMonitor(robotStateJointController, view, cameraview, mapServerSource)
debrisDemo = debrisdemo.DebrisPlannerDemo(robotStateModel, robotStateJointController, playbackRobotModel,
ikPlanner, manipPlanner, atlasdriver.driver, lHandDriver,
perception.multisenseDriver, refitBlocks)
tableDemo = tabledemo.TableDemo(robotStateModel, playbackRobotModel,
ikPlanner, manipPlanner, footstepsDriver, atlasdriver.driver, lHandDriver, rHandDriver,
perception.multisenseDriver, view, robotStateJointController, playPlans, teleopPanel)
tableTaskPanel = tabledemo.TableTaskPanel(tableDemo)
drillDemo = drilldemo.DrillPlannerDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver,
fitDrillMultisense, robotStateJointController,
playPlans, teleopPanel.showPose, cameraview, segmentationpanel)
drillTaskPanel = drilldemo.DrillTaskPanel(drillDemo)
valveDemo = valvedemo.ValvePlannerDemo(robotStateModel, footstepsDriver, footstepsPanel, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, robotStateJointController)
valveTaskPanel = valvedemo.ValveTaskPanel(valveDemo)
drivingPlannerPanel = drivingplanner.DrivingPlannerPanel(robotSystem)
walkingDemo = walkingtestdemo.walkingTestDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver,
robotStateJointController,
playPlans, showPose)
bihandedDemo = bihandeddemo.BihandedPlannerDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver,
fitDrillMultisense, robotStateJointController,
playPlans, showPose, cameraview, segmentationpanel)
mappingDemo = mappingdemo.MappingDemo(robotStateModel, playbackRobotModel,
ikPlanner, manipPlanner, footstepsDriver, atlasdriver.driver, lHandDriver, rHandDriver,
perception.multisenseDriver, view, robotStateJointController, playPlans)
doorDemo = doordemo.DoorDemo(robotStateModel, footstepsDriver, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver,
fitDrillMultisense, robotStateJointController,
playPlans, showPose)
doorTaskPanel = doordemo.DoorTaskPanel(doorDemo)
terrainTaskPanel = terraintask.TerrainTaskPanel(robotSystem)
terrainTask = terrainTaskPanel.terrainTask
surpriseTaskPanel = surprisetask.SurpriseTaskPanel(robotSystem)
surpriseTask = surpriseTaskPanel.planner
egressPanel = egressplanner.EgressPanel(robotSystem)
egressPlanner = egressPanel.egressPlanner
taskPanels = OrderedDict()
taskPanels['Driving'] = drivingPlannerPanel.widget
taskPanels['Egress'] = egressPanel.widget
taskPanels['Door'] = doorTaskPanel.widget
taskPanels['Valve'] = valveTaskPanel.widget
taskPanels['Drill'] = drillTaskPanel.widget
taskPanels['Surprise'] = surpriseTaskPanel.widget
taskPanels['Terrain'] = terrainTaskPanel.widget
taskPanels['Table'] = tableTaskPanel.widget
tasklaunchpanel.init(taskPanels)
splinewidget.init(view, handFactory, robotStateModel)
rt.robotSystem = robotSystem
taskManagerPanel = taskmanagerwidget.init()
for taskDescription in loadTaskDescriptions():
taskManagerPanel.taskQueueWidget.loadTaskDescription(taskDescription[0], taskDescription[1])
taskManagerPanel.taskQueueWidget.setCurrentQueue('Task library')
for obj in om.getObjects():
obj.setProperty('Deletable', False)
if useCOPMonitor:
copMonitor = copmonitor.COPMonitor(robotSystem, view);
if useNavigationPanel:
navigationPanel = navigationpanel.init(robotStateJointController, footstepsDriver)
picker = PointPicker(view, callback=navigationPanel.pointPickerStoredFootsteps, numberOfPoints=2)
#picker.start()
continuouswalkingDemo = continuouswalkingdemo.ContinousWalkingDemo(robotStateModel, footstepsPanel, robotStateJointController, ikPlanner,
teleopJointController, navigationPanel, cameraview)
if useLoggingWidget:
w = lcmloggerwidget.LCMLoggerWidget(statusBar=app.getMainWindow().statusBar())
app.getMainWindow().statusBar().addPermanentWidget(w.button)
useMappingPanel = True
if useMappingPanel:
mappingPanel = mappingpanel.init(robotStateJointController, footstepsDriver)
if useControllerRate:
class ControllerRateLabel(object):
'''
Displays a controller frequency in the status bar
'''
def __init__(self, atlasDriver, statusBar):
self.atlasDriver = atlasDriver
self.label = QtGui.QLabel('')
statusBar.addPermanentWidget(self.label)
self.timer = TimerCallback(targetFps=1)
self.timer.callback = self.showRate
self.timer.start()
def showRate(self):
rate = self.atlasDriver.getControllerRate()
rate = 'unknown' if rate is None else '%d hz' % rate
self.label.text = 'Controller rate: %s' % rate
controllerRateLabel = ControllerRateLabel(atlasDriver, app.getMainWindow().statusBar())
if useForceDisplay:
class LCMForceDisplay(object):
'''
Displays foot force sensor signals in a status bar widget or label widget
'''
def onAtlasState(self,msg):
self.l_foot_force_z = msg.force_torque.l_foot_force_z
self.r_foot_force_z = msg.force_torque.r_foot_force_z
def __init__(self, channel, statusBar=None):
self.sub = lcmUtils.addSubscriber(channel, lcmdrc.atlas_state_t, self.onAtlasState)
self.label = QtGui.QLabel('')
statusBar.addPermanentWidget(self.label)
self.timer = TimerCallback(targetFps=10)
self.timer.callback = self.showRate
self.timer.start()
self.l_foot_force_z = 0
self.r_foot_force_z = 0
def __del__(self):
lcmUtils.removeSubscriber(self.sub)
def showRate(self):
global leftInContact, rightInContact
self.label.text = '%.2f | %.2f' % (self.l_foot_force_z,self.r_foot_force_z)
rateComputer = LCMForceDisplay('ATLAS_STATE', app.getMainWindow().statusBar())
if useSkybox:
skyboxDataDir = os.path.expanduser('~/Downloads/skybox')
imageMap = skybox.getSkyboxImages(skyboxDataDir)
skyboxObjs = skybox.createSkybox(imageMap, view)
skybox.connectSkyboxCamera(view)
#skybox.createTextureGround(os.path.join(skyboxDataDir, 'Dirt_seamless.jpg'), view)
#view.camera().SetViewAngle(60)
class RobotLinkHighligher(object):
def __init__(self, robotModel):
self.robotModel = robotModel
self.previousColors = {}
def highlightLink(self, linkName, color):
currentColor = self.robotModel.model.getLinkColor(linkName)
if not currentColor.isValid():
return
if linkName not in self.previousColors:
self.previousColors[linkName] = currentColor
alpha = self.robotModel.getProperty('Alpha')
newColor = QtGui.QColor(color[0]*255, color[1]*255, color[2]*255, alpha*255)
self.robotModel.model.setLinkColor(linkName, newColor)
def dehighlightLink(self, linkName):
color = self.previousColors.pop(linkName, None)
if color is None:
return
color.setAlpha(self.robotModel.getProperty('Alpha')*255)
self.robotModel.model.setLinkColor(linkName, color)
robotHighlighter = RobotLinkHighligher(robotStateModel)
if useFootContactVis:
def onFootContact(msg):
leftInContact = msg.left_contact > 0.0
rightInContact = msg.right_contact > 0.0
for linkName, inContact in [['l_foot', msg.left_contact > 0.0], ['r_foot', msg.right_contact > 0.0]]:
if inContact:
robotHighlighter.highlightLink(linkName, [0, 0, 1])
else:
robotHighlighter.dehighlightLink(linkName)
#robotStateModel.model.setLinkColor(drcargs.getDirectorConfig()['leftFootLink'], contactColor if leftInContact else noContactColor)
#robotStateModel.model.setLinkColor(drcargs.getDirectorConfig()['rightFootLink'], contactColor if rightInContact else noContactColor)
footContactSub = lcmUtils.addSubscriber('FOOT_CONTACT_ESTIMATE', lcmdrc.foot_contact_estimate_t, onFootContact)
footContactSub.setSpeedLimit(60)
if useFallDetectorVis:
def onPlanStatus(msg):
links = ['pelvis', 'utorso']
if msg.plan_type == lcmdrc.plan_status_t.RECOVERING:
for link in links:
robotHighlighter.highlightLink(link, [1,0.4,0.0])
elif msg.plan_type == lcmdrc.plan_status_t.BRACING:
for link in links:
robotHighlighter.highlightLink(link, [1, 0, 0])
else:
for link in links:
robotHighlighter.dehighlightLink(link)
fallDetectorSub = lcmUtils.addSubscriber("PLAN_EXECUTION_STATUS", lcmdrc.plan_status_t, onPlanStatus)
fallDetectorSub.setSpeedLimit(10)
if useDataFiles:
for filename in drcargs.args().data_files:
polyData = io.readPolyData(filename)
if polyData:
vis.showPolyData(polyData, os.path.basename(filename))
if useImageWidget:
imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'CAMERA_LEFT', view)
#imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'KINECT_RGB', view)
if useCameraFrustumVisualizer:
cameraFrustumVisualizer = cameraview.CameraFrustumVisualizer(robotStateModel, cameraview.imageManager, 'CAMERA_LEFT')
class ImageOverlayManager(object):
def __init__(self):
self.viewName = 'CAMERA_LEFT'
#self.viewName = 'KINECT_RGB'
self.size = 400
self.position = [0, 0]
self.usePicker = False
self.imageView = None
self.imagePicker = None
self._prevParent = None
def show(self):
if self.imageView:
return
imageView = cameraview.views[self.viewName]
self.imageView = imageView
self._prevParent = imageView.view.parent()
imageView.view.hide()
imageView.view.setParent(view)
imageView.view.resize(self.size, self.size)
imageView.view.move(*self.position)
imageView.view.show()
if self.usePicker:
self.imagePicker = ImagePointPicker(imageView)
self.imagePicker.start()
def hide(self):
if self.imageView:
self.imageView.view.hide()
self.imageView.view.setParent(self._prevParent)
self.imageView.view.show()
self.imageView = None
if self.imagePicker:
self.imagePicker.stop()
class ToggleImageViewHandler(object):
def __init__(self, manager):
self.action = app.getToolsMenuActions()['ActionToggleImageView']
self.action.connect('triggered()', self.toggle)
self.manager = manager
def toggle(self):
if self.action.checked:
self.manager.show()
else:
self.manager.hide()
imageOverlayManager = ImageOverlayManager()
imageViewHandler = ToggleImageViewHandler(imageOverlayManager)
showImageOverlay = imageOverlayManager.show
hideImageOverlay = imageOverlayManager.hide
screengrabberpanel.init(view)
framevisualization.init(view)
affordancePanel = affordancepanel.init(view, affordanceManager, ikServer, robotStateJointController, raycastDriver)
camerabookmarks.init(view)
def getLinkFrame(linkName, model=None):
model = model or robotStateModel
return model.getLinkFrame(linkName)
def showLinkFrame(linkName, model=None):
frame = getLinkFrame(linkName, model)
if not frame:
raise Exception('Link not found: ' + linkName)
return vis.updateFrame(frame, linkName, parent='link frames')
def sendEstRobotState(pose=None):
if pose is None:
pose = robotStateJointController.q
msg = robotstate.drakePoseToRobotState(pose)
lcmUtils.publish('EST_ROBOT_STATE', msg)
def enableArmEncoders():
msg = lcmdrc.utime_t()
msg.utime = 1
lcmUtils.publish('ENABLE_ENCODERS', msg)
def disableArmEncoders():
msg = lcmdrc.utime_t()
msg.utime = -1
lcmUtils.publish('ENABLE_ENCODERS', msg)
def sendDesiredPumpPsi(desiredPsi):
atlasDriver.sendDesiredPumpPsi(desiredPsi)
app.setCameraTerrainModeEnabled(view, True)
app.resetCamera(viewDirection=[-1,0,0], view=view)
viewBehaviors = viewbehaviors.ViewBehaviors(view)
# Drill Demo Functions for in-image rendering:
useDrillDemo = False
if useDrillDemo:
def spawnHandAtCurrentLocation(side='left'):
if (side is 'left'):
tf = transformUtils.copyFrame( getLinkFrame( 'l_hand_face') )
handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'left')
else:
tf = transformUtils.copyFrame( getLinkFrame( 'right_pointer_tip') )
handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'right')
def drawFrameInCamera(t, frameName='new frame',visible=True):
v = imageView.view
q = cameraview.imageManager.queue
localToCameraT = vtk.vtkTransform()
q.getTransform('local', 'CAMERA_LEFT', localToCameraT)
res = vis.showFrame( vtk.vtkTransform() , 'temp',view=v, visible=True, scale = 0.2)
om.removeFromObjectModel(res)
pd = res.polyData
pd = filterUtils.transformPolyData(pd, t)
pd = filterUtils.transformPolyData(pd, localToCameraT)
q.projectPoints('CAMERA_LEFT', pd )
vis.showPolyData(pd, ('overlay ' + frameName), view=v, colorByName='Axes',parent='camera overlay',visible=visible)
def drawObjectInCamera(objectName,visible=True):
v = imageView.view
q = cameraview.imageManager.queue
localToCameraT = vtk.vtkTransform()
q.getTransform('local', 'CAMERA_LEFT', localToCameraT)
obj = om.findObjectByName(objectName)
if obj is None:
return
objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform())
objPolyDataOriginal = obj.polyData
pd = objPolyDataOriginal
pd = filterUtils.transformPolyData(pd, objToLocalT)
pd = filterUtils.transformPolyData(pd, localToCameraT)
q.projectPoints('CAMERA_LEFT', pd)
vis.showPolyData(pd, ('overlay ' + objectName), view=v, color=[0,1,0],parent='camera overlay',visible=visible)
def projectDrillDemoInCamera():
q = om.findObjectByName('camera overlay')
om.removeFromObjectModel(q)
imageView = cameraview.views['CAMERA_LEFT']
imageView.imageActor.SetOpacity(.2)
drawFrameInCamera(drillDemo.drill.frame.transform, 'drill frame',visible=False)
tf = transformUtils.copyFrame( drillDemo.drill.frame.transform )
tf.PreMultiply()
tf.Concatenate( drillDemo.drill.drillToButtonTransform )
drawFrameInCamera(tf, 'drill button')
tf2 = transformUtils.copyFrame( tf )
tf2.PreMultiply()
tf2.Concatenate( transformUtils.frameFromPositionAndRPY( [0,0,0] , [180,0,0] ) )
drawFrameInCamera(tf2, 'drill button flip')
drawObjectInCamera('drill',visible=False)
drawObjectInCamera('sensed pointer tip')
obj = om.findObjectByName('sensed pointer tip frame')
if (obj is not None):
drawFrameInCamera(obj.transform, 'sensed pointer tip frame',visible=False)
#drawObjectInCamera('left robotiq',visible=False)
#drawObjectInCamera('right pointer',visible=False)
v = imageView.view
v.render()
showImageOverlay()
drillDemo.pointerTracker = createPointerTracker()
drillDemo.projectCallback = projectDrillDemoInCamera
drillYawPreTransform = vtk.vtkTransform()
drillYawPreTransform.PostMultiply()
def onDrillYawSliderChanged(value):
yawOffset = value - 180.0
drillDemo.drillYawSliderValue = yawOffset
drillDemo.updateDrillToHand()
app.getMainWindow().macrosToolBar().addWidget(QtGui.QLabel('drill yaw:'))
slider = QtGui.QSlider(QtCore.Qt.Horizontal)
slider.setMaximum(360)
slider.setValue(180)
slider.setMaximumWidth(200)
slider.connect('valueChanged(int)', onDrillYawSliderChanged)
app.getMainWindow().macrosToolBar().addWidget(slider)
def sendPointerPrep():
drillDemo.planPointerPressGaze(-0.05)
def sendPointerPress():
drillDemo.planPointerPressGaze(0.01)
def sendPointerPressDeep():
drillDemo.planPointerPressGaze(0.015)
app.addToolbarMacro('drill posture', drillDemo.planBothRaisePowerOn)
app.addToolbarMacro('pointer prep', sendPointerPrep)
app.addToolbarMacro('pointer press', sendPointerPress)
app.addToolbarMacro('pointer press deep', sendPointerPressDeep)
if usePFGrasp:
pfgrasper = pfgrasp.PFGrasp(drillDemo, robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner,
lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver,
fitDrillMultisense, robotStateJointController,
playPlans, showPose, cameraview, segmentationpanel)
showImageOverlay()
hideImageOverlay()
pfgrasppanel.init(pfgrasper, _prevParent, imageView, imagePicker, cameraview)
import signal
def sendMatlabSigint():
ikServer.comm.client.proc.send_signal(signal.SIGINT)
#app.addToolbarMacro('Ctrl+C MATLAB', sendMatlabSigint)
class AffordanceTextureUpdater(object):
def __init__(self, affordanceManager):
self.affordanceManager = affordanceManager
self.timer = TimerCallback(targetFps=10)
self.timer.callback = self.updateTextures
self.timer.start()
def updateTexture(self, obj):
if obj.getProperty('Camera Texture Enabled'):
cameraview.applyCameraTexture(obj, cameraview.imageManager)
else:
cameraview.disableCameraTexture(obj)
obj._renderAllViews()
def updateTextures(self):
for aff in affordanceManager.getAffordances():
self.updateTexture(aff)
affordanceTextureUpdater = AffordanceTextureUpdater(affordanceManager)
def drawCenterOfMass(model):
stanceFrame = footstepsDriver.getFeetMidPoint(model)
com = list(model.model.getCenterOfMass())
com[2] = stanceFrame.GetPosition()[2]
d = DebugData()
d.addSphere(com, radius=0.015)
obj = vis.updatePolyData(d.getPolyData(), 'COM %s' % model.getProperty('Name'), color=[1,0,0], visible=False, parent=model)
def initCenterOfMassVisulization():
for model in [robotStateModel, teleopRobotModel, playbackRobotModel]:
model.connectModelChanged(drawCenterOfMass)
drawCenterOfMass(model)
initCenterOfMassVisulization()
class RobotMoverWidget(object):
def __init__(self, jointController):
self.jointController = jointController
pos, rpy = jointController.q[:3], jointController.q[3:6]
t = transformUtils.frameFromPositionAndRPY(pos, np.degrees(rpy))
self.frame = vis.showFrame(t, 'mover widget', scale=0.3)
self.frame.setProperty('Edit', True)
self.frame.connectFrameModified(self.onFrameModified)
def onFrameModified(self, frame):
pos, rpy = self.frame.transform.GetPosition(), transformUtils.rollPitchYawFromTransform(self.frame.transform)
q = self.jointController.q.copy()
q[:3] = pos
q[3:6] = rpy
self.jointController.setPose('moved_pose', q)
class RobotGridUpdater(object):
def __init__(self, gridFrame, robotModel, jointController):
self.gridFrame = gridFrame
self.robotModel = robotModel
self.jointController = jointController
self.robotModel.connectModelChanged(self.updateGrid)
def updateGrid(self, model):
pos = self.jointController.q[:3]
x = int(np.round(pos[0])) / 10
y = int(np.round(pos[1])) / 10
z = int(np.round(pos[2] - 0.85)) / 1
t = vtk.vtkTransform()
t.Translate((x*10,y*10,z))
self.gridFrame.copyFrame(t)
gridUpdater = RobotGridUpdater(grid.getChildFrame(), robotStateModel, robotStateJointController)
class IgnoreOldStateMessagesSelector(object):
def __init__(self, jointController):
self.jointController = jointController
self.action = app.addMenuAction('Tools', 'Ignore Old State Messages')
self.action.setCheckable(True)
self.action.setChecked(self.jointController.ignoreOldStateMessages)
self.action.connect('triggered()', self.toggle)
def toggle(self):
self.jointController.ignoreOldStateMessages = bool(self.action.checked)
IgnoreOldStateMessagesSelector(robotStateJointController)
class RandomWalk(object):
def __init__(self, max_distance_per_plan=2):
self.subs = []
self.max_distance_per_plan=max_distance_per_plan
def handleStatus(self, msg):
if msg.plan_type == msg.STANDING:
goal = transformUtils.frameFromPositionAndRPY(
np.array([robotStateJointController.q[0] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5),
robotStateJointController.q[1] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5),
robotStateJointController.q[2] - 0.84]),
[0, 0, robotStateJointController.q[5] + 2 * np.degrees(np.pi) * (np.random.random() - 0.5)])
request = footstepsDriver.constructFootstepPlanRequest(robotStateJointController.q, goal)
request.params.max_num_steps = 18
footstepsDriver.sendFootstepPlanRequest(request)
def handleFootstepPlan(self, msg):
footstepsDriver.commitFootstepPlan(msg)
def start(self):
sub = lcmUtils.addSubscriber('PLAN_EXECUTION_STATUS', lcmdrc.plan_status_t, self.handleStatus)
sub.setSpeedLimit(0.2)
self.subs.append(sub)
self.subs.append(lcmUtils.addSubscriber('FOOTSTEP_PLAN_RESPONSE', lcmdrc.footstep_plan_t, self.handleFootstepPlan))
def stop(self):
for sub in self.subs:
lcmUtils.removeSubscriber(sub)
if useRandomWalk:
randomWalk = RandomWalk()
if useCourseModel:
courseModel = coursemodel.CourseModel()
if 'useKuka' in drcargs.getDirectorConfig()['userConfig']:
import kinectlcm
#kinectlcm.init()
imageOverlayManager.viewName = "KINECT_RGB"
#ikPlanner.fixedBaseArm = True
#showImageOverlay()
if 'exo' in drcargs.args():
if (drcargs.args().exo):
ikPlanner.pushToMatlab = False
def roomMap():
mappingPanel.onStartMappingButton()
t = mappingdemo.MappingDemo(robotStateModel, playbackRobotModel,
ikPlanner, manipPlanner, footstepsDriver, atlasdriver.driver, lHandDriver, rHandDriver,
perception.multisenseDriver, view, robotStateJointController, playPlans)
t.visOnly = False
t.optionalUserPromptEnabled = False
q = t.autonomousExecuteRoomMap()
q.connectTaskEnded(mappingSweepEnded)
q.start()
def mappingSweepEnded(taskQ, task):
if task.func_name == 'doneIndicator':
import time as qq
mappingPanel.onStopMappingButton()
qq.sleep(3)
mappingPanel.onShowMapButton()
print "DONE WITH MAPPING ROOM"
|
|
import datetime
import json
import httpretty
import pytest
from django.utils import timezone
from nose.tools import * # flake8: noqa
from framework.auth.core import Auth
from addons.github.tests.factories import GitHubAccountFactory
from osf.models import AbstractNode as Node
from website.util import waterbutler_api_url_for
from api.base.settings.defaults import API_BASE
from api_tests import utils as api_utils
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory
)
def prepare_mock_wb_response(
node=None,
provider='github',
files=None,
folder=True,
path='/',
method=httpretty.GET,
status_code=200
):
"""Prepare a mock Waterbutler response with httpretty.
:param Node node: Target node.
:param str provider: Addon provider
:param list files: Optional list of files. You can specify partial data; missing values
will have defaults.
:param folder: True if mocking out a folder response, False if a file response.
:param path: Waterbutler path, passed to waterbutler_api_url_for.
:param str method: HTTP method.
:param int status_code: HTTP status.
"""
node = node
files = files or []
wb_url = waterbutler_api_url_for(node._id, provider=provider, path=path, meta=True)
default_file = {
u'contentType': None,
u'extra': {u'downloads': 0, u'version': 1},
u'kind': u'file',
u'modified': None,
u'name': u'NewFile',
u'path': u'/NewFile',
u'provider': provider,
u'size': None,
u'materialized': '/',
}
if len(files):
data = [dict(default_file, **each) for each in files]
else:
data = [default_file]
jsonapi_data = []
for datum in data:
jsonapi_data.append({'attributes': datum})
if not folder:
jsonapi_data = jsonapi_data[0]
body = json.dumps({
u'data': jsonapi_data
})
httpretty.register_uri(
method,
wb_url,
body=body,
status=status_code,
content_type='application/json'
)
class TestNodeFilesList(ApiTestCase):
def setUp(self):
super(TestNodeFilesList, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.private_url = '/{}nodes/{}/files/'.format(API_BASE, self.project._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(creator=self.user, is_public=True)
self.public_url = '/{}nodes/{}/files/'.format(API_BASE, self.public_project._id)
httpretty.enable()
def tearDown(self):
super(TestNodeFilesList, self).tearDown()
httpretty.disable()
httpretty.reset()
def add_github(self):
user_auth = Auth(self.user)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {oauth_settings._id: []}
addon.user_settings.save()
def _prepare_mock_wb_response(self, node=None, **kwargs):
prepare_mock_wb_response(node=node or self.project, **kwargs)
def test_returns_public_files_logged_out(self):
res = self.app.get(self.public_url, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'][0]['attributes']['provider'], 'osfstorage')
assert_equal(res.content_type, 'application/vnd.api+json')
def test_returns_public_files_logged_in(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data'][0]['attributes']['provider'], 'osfstorage')
def test_returns_storage_addons_link(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_in('storage_addons', res.json['data'][0]['links'])
def test_returns_file_data(self):
fobj = self.project.get_addon('osfstorage').get_root().append_file('NewFile')
fobj.save()
res = self.app.get('{}osfstorage/{}'.format(self.private_url, fobj._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(isinstance(res.json['data'], dict))
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['kind'], 'file')
assert_equal(res.json['data']['attributes']['name'], 'NewFile')
def test_returns_osfstorage_folder_version_two(self):
fobj = self.project.get_addon('osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get('{}osfstorage/'.format(self.private_url), auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_returns_osf_storage_folder_version_two_point_two(self):
fobj = self.project.get_addon('osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get('{}osfstorage/?version=2.2'.format(self.private_url), auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_list_returns_folder_data(self):
fobj = self.project.get_addon('osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get('{}osfstorage/'.format(self.private_url, fobj._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFolder')
def test_returns_folder_data(self):
fobj = self.project.get_addon('osfstorage').get_root().append_folder('NewFolder')
fobj.save()
res = self.app.get('{}osfstorage/{}/'.format(self.private_url, fobj._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 0)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_returns_private_files_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_returns_private_files_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['attributes']['provider'], 'osfstorage')
def test_returns_private_files_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_returns_addon_folders(self):
user_auth = Auth(self.user)
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['attributes']['provider'], 'osfstorage')
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {oauth_settings._id: []}
addon.user_settings.save()
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
providers = [item['attributes']['provider'] for item in data]
assert_equal(len(data), 2)
assert_in('github', providers)
assert_in('osfstorage', providers)
def test_returns_node_files_list(self):
self._prepare_mock_wb_response(provider='github', files=[{'name': 'NewFile'}])
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['data'][0]['attributes']['name'], 'NewFile')
assert_equal(res.json['data'][0]['attributes']['provider'], 'github')
def test_returns_node_file(self):
self._prepare_mock_wb_response(provider='github', files=[{'name': 'NewFile'}], folder=False, path='/file')
self.add_github()
url = '/{}nodes/{}/files/github/file'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, headers={
'COOKIE': 'foo=bar;' # Webtests doesnt support cookies?
})
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'NewFile')
assert_equal(res.json['data']['attributes']['provider'], 'github')
def test_notfound_node_file_returns_folder(self):
self._prepare_mock_wb_response(provider='github', files=[{'name': 'NewFile'}], path='/file')
url = '/{}nodes/{}/files/github/file'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True, headers={
'COOKIE': 'foo=bar;' # Webtests doesnt support cookies?
})
assert_equal(res.status_code, 404)
def test_notfound_node_folder_returns_file(self):
self._prepare_mock_wb_response(provider='github', files=[{'name': 'NewFile'}], folder=False, path='/')
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True, headers={
'COOKIE': 'foo=bar;' # Webtests doesnt support cookies?
})
assert_equal(res.status_code, 404)
def test_waterbutler_server_error_returns_503(self):
self._prepare_mock_wb_response(status_code=500)
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True, headers={
'COOKIE': 'foo=bar;' # Webtests doesnt support cookies?
})
assert_equal(res.status_code, 503)
def test_waterbutler_invalid_data_returns_503(self):
wb_url = waterbutler_api_url_for(self.project._id, provider='github', path='/', meta=True)
self.add_github()
httpretty.register_uri(
httpretty.GET,
wb_url,
body=json.dumps({}),
status=400
)
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 503)
def test_handles_unauthenticated_waterbutler_request(self):
self._prepare_mock_wb_response(status_code=401)
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_handles_notfound_waterbutler_request(self):
invalid_provider = 'gilkjadsflhub'
self._prepare_mock_wb_response(status_code=404, provider=invalid_provider)
url = '/{}nodes/{}/files/{}/'.format(API_BASE, self.project._id, invalid_provider)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
def test_handles_request_to_provider_not_configured_on_project(self):
provider = 'box'
url = '/{}nodes/{}/files/{}/'.format(API_BASE, self.project._id, provider)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_false(self.project.get_addon(provider))
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'The {} provider is not configured for this project.'.format(provider))
def test_handles_bad_waterbutler_request(self):
wb_url = waterbutler_api_url_for(self.project._id, provider='github', path='/', meta=True)
httpretty.register_uri(
httpretty.GET,
wb_url,
body=json.dumps({}),
status=418
)
self.add_github()
url = '/{}nodes/{}/files/github/'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 503)
assert_in('detail', res.json['errors'][0])
def test_files_list_contains_relationships_object(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert 'relationships' in res.json['data'][0]
class TestNodeFilesListFiltering(ApiTestCase):
def setUp(self):
super(TestNodeFilesListFiltering, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
httpretty.enable()
# Prep HTTP mocks
prepare_mock_wb_response(
node=self.project,
provider='github',
files=[
{'name': 'abc', 'path': '/abc/', 'materialized': '/abc/', 'kind': 'folder'},
{'name': 'xyz', 'path': '/xyz', 'materialized': '/xyz', 'kind': 'file'},
]
)
def tearDown(self):
super(TestNodeFilesListFiltering, self).tearDown()
httpretty.disable()
httpretty.reset()
def add_github(self):
user_auth = Auth(self.user)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {oauth_settings._id: []}
addon.user_settings.save()
def test_node_files_are_filterable_by_name(self):
url = '/{}nodes/{}/files/github/?filter[name]=xyz'.format(API_BASE, self.project._id)
self.add_github()
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'abc'
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
def test_node_files_filter_by_name_case_insensitive(self):
url = '/{}nodes/{}/files/github/?filter[name]=XYZ'.format(API_BASE, self.project._id)
self.add_github()
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'abc', but finds 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'xyz')
def test_node_files_are_filterable_by_path(self):
url = '/{}nodes/{}/files/github/?filter[path]=abc'.format(API_BASE, self.project._id)
self.add_github()
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
def test_node_files_are_filterable_by_kind(self):
url = '/{}nodes/{}/files/github/?filter[kind]=folder'.format(API_BASE, self.project._id)
self.add_github()
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1) # filters out 'xyz'
assert_equal(res.json['data'][0]['attributes']['name'], 'abc')
def test_node_files_external_provider_can_filter_by_last_touched(self):
yesterday_stamp = timezone.now() - datetime.timedelta(days=1)
self.add_github()
url = '/{}nodes/{}/files/github/?filter[last_touched][gt]={}'.format(API_BASE,
self.project._id,
yesterday_stamp.isoformat())
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
def test_node_files_osfstorage_cannot_filter_by_last_touched(self):
yesterday_stamp = timezone.now() - datetime.timedelta(days=1)
self.file = api_utils.create_test_file(self.project, self.user)
url = '/{}nodes/{}/files/osfstorage/?filter[last_touched][gt]={}'.format(API_BASE,
self.project._id,
yesterday_stamp.isoformat())
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
class TestNodeFilesListPagination(ApiTestCase):
def setUp(self):
super(TestNodeFilesListPagination, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
httpretty.enable()
def tearDown(self):
super(TestNodeFilesListPagination, self).tearDown()
httpretty.disable()
httpretty.reset()
def add_github(self):
user_auth = Auth(self.user)
self.project.add_addon('github', auth=user_auth)
addon = self.project.get_addon('github')
addon.repo = 'something'
addon.user = 'someone'
oauth_settings = GitHubAccountFactory()
oauth_settings.save()
self.user.add_addon('github')
self.user.external_accounts.add(oauth_settings)
self.user.save()
addon.user_settings = self.user.get_addon('github')
addon.external_account = oauth_settings
addon.save()
self.project.save()
addon.user_settings.oauth_grants[self.project._id] = {oauth_settings._id: []}
addon.user_settings.save()
def check_file_order(self, resp):
previous_file_name = 0
for file in resp.json['data']:
int_file_name = int(file['attributes']['name'])
assert int_file_name > previous_file_name, 'Files were not in order'
previous_file_name = int_file_name
def test_node_files_are_sorted_correctly(self):
prepare_mock_wb_response(
node=self.project,
provider='github',
files=[
{'name': '01', 'path': '/01/', 'materialized': '/01/', 'kind': 'folder'},
{'name': '02', 'path': '/02', 'materialized': '/02', 'kind': 'file'},
{'name': '03', 'path': '/03/', 'materialized': '/03/', 'kind': 'folder'},
{'name': '04', 'path': '/04', 'materialized': '/04', 'kind': 'file'},
{'name': '05', 'path': '/05/', 'materialized': '/05/', 'kind': 'folder'},
{'name': '06', 'path': '/06', 'materialized': '/06', 'kind': 'file'},
{'name': '07', 'path': '/07/', 'materialized': '/07/', 'kind': 'folder'},
{'name': '08', 'path': '/08', 'materialized': '/08', 'kind': 'file'},
{'name': '09', 'path': '/09/', 'materialized': '/09/', 'kind': 'folder'},
{'name': '10', 'path': '/10', 'materialized': '/10', 'kind': 'file'},
{'name': '11', 'path': '/11/', 'materialized': '/11/', 'kind': 'folder'},
{'name': '12', 'path': '/12', 'materialized': '/12', 'kind': 'file'},
{'name': '13', 'path': '/13/', 'materialized': '/13/', 'kind': 'folder'},
{'name': '14', 'path': '/14', 'materialized': '/14', 'kind': 'file'},
{'name': '15', 'path': '/15/', 'materialized': '/15/', 'kind': 'folder'},
{'name': '16', 'path': '/16', 'materialized': '/16', 'kind': 'file'},
{'name': '17', 'path': '/17/', 'materialized': '/17/', 'kind': 'folder'},
{'name': '18', 'path': '/18', 'materialized': '/18', 'kind': 'file'},
{'name': '19', 'path': '/19/', 'materialized': '/19/', 'kind': 'folder'},
{'name': '20', 'path': '/20', 'materialized': '/20', 'kind': 'file'},
{'name': '21', 'path': '/21/', 'materialized': '/21/', 'kind': 'folder'},
{'name': '22', 'path': '/22', 'materialized': '/22', 'kind': 'file'},
{'name': '23', 'path': '/23/', 'materialized': '/23/', 'kind': 'folder'},
{'name': '24', 'path': '/24', 'materialized': '/24', 'kind': 'file'},
]
)
self.add_github()
url = '/{}nodes/{}/files/github/?page[size]=100'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth)
self.check_file_order(res)
class TestNodeProviderDetail(ApiTestCase):
def setUp(self):
super(TestNodeProviderDetail, self).setUp()
self.user = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True)
self.private_project = ProjectFactory(creator=self.user)
self.public_url = '/{}nodes/{}/files/providers/osfstorage/'.format(API_BASE, self.public_project._id)
self.private_url = '/{}nodes/{}/files/providers/osfstorage/'.format(API_BASE, self.private_project._id)
def test_can_view_if_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], '{}:osfstorage'.format(self.private_project._id))
def test_can_view_if_public(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], '{}:osfstorage'.format(self.public_project._id))
def test_cannot_view_if_private(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
|
|
import inspect
from abc import ABCMeta
from collections import OrderedDict
from functools import wraps
from operator import attrgetter
from types import MethodType
import six
from six import add_metaclass
from theano import tensor
from theano.gof import Variable
from blocks.graph import add_annotation, Annotation
from blocks.roles import add_role, PARAMETER, INPUT, OUTPUT
from blocks.utils import dict_union, pack, repr_attrs, reraise_as, unpack
from blocks.utils.containers import AnnotatingList
def create_unbound_method(func, cls):
"""Create an unbounded method from a function and a class.
Notes
-----
See https://bitbucket.org/gutworth/six/pull-request/64.
"""
if six.PY2:
return MethodType(func, None, cls)
if six.PY3:
return func
# Rename built-in property to avoid conflict with Application.property
property_ = property
class Parameters(AnnotatingList):
"""Adds the PARAMETER role to parameters automatically."""
def __init__(self, brick, *args, **kwargs):
self.brick = brick
super(Parameters, self).__init__(*args, **kwargs)
def _setitem(self, key, value):
if isinstance(value, Variable):
add_role(value, PARAMETER)
add_annotation(value, self.brick)
def __getattr__(self, name):
if name == '_items':
raise AttributeError
for l in self._items:
if getattr(l, 'name', None) == name:
return l
raise AttributeError
class Children(AnnotatingList):
"""Adds the brick to the list of parents of its children."""
def __init__(self, brick, *args, **kwargs):
self.brick = brick
super(Children, self).__init__(*args, **kwargs)
def _setitem(self, key, value):
if value is not None:
value.parents.append(self.brick)
def _delitem(self, key):
child = self._items[key]
if child is not None:
child.parents.remove(self.brick)
class Application(object):
"""An application method belonging to a particular type of brick.
The application methods of each :class:`Brick` class are automatically
replaced by an instance of :class:`Application`. This allows us to
store metadata about particular application methods (such as their in-
and outputs) easily.
Attributes
----------
application : callable
The original (unbounded) application function defined on the
:class:`Brick`.
delegate_function : callable
A function that takes a :class:`Brick` instance as an argument and
returns a :class:`BoundApplication` object to which attribute
requests should be routed.
properties : :obj:`dict` (:obj:`str`, :obj:`callable`)
A dictionary of property getters that should be called when an
attribute with the given name is requested.
instances : :obj:`dict` (:class:`Brick`, :class:`BoundApplication`)
A record of bound application instances created by the descriptor
protocol.
call_stack : :obj:`list` of :class:`Brick`
The call stack of brick application methods. Used to check whether
the current call was made by a parent brick.
brick : type
The brick class to which this instance belongs.
Raises
------
ValueError
If a brick's application method is applied by another brick which
does not list the former as a child.
ValueError
If the application method's inputs and/or outputs don't match with
the function signature or the values returned (respectively).
Notes
-----
When a :class:`Brick` is instantiated and its application method (i.e.
an instance of this class) requested, the descriptor protocol (through
the :meth:`__get__` method) automatically instantiates a
:class:`BoundApplication` class and returns this. This bound
application class can be used to store application information
particular to a brick instance. Any attributes unknown to the bounded
application are automatically routed to the application that
instantiated it.
"""
call_stack = []
def __init__(self, application_function):
self.__doc__ = application_function.__doc__
self._application_function = application_function
self.application_name = application_function.__name__
self.delegate_function = None
self.properties = {}
@property
def application_function(self):
if hasattr(self, '_application_function'):
return self._application_function
return getattr(self.brick, '_' + self.application_name)
def property(self, name):
"""Decorator to make application properties.
Parameters
----------
name : str
The name the property should take.
Examples
--------
>>> class Foo(Brick):
... @application
... def apply(self, x):
... return x + 1
...
... @apply.property('inputs')
... def apply_inputs(self):
... return ['foo', 'bar']
>>> foo = Foo()
>>> foo.apply.inputs
['foo', 'bar']
"""
if not isinstance(name, six.string_types):
raise ValueError
def wrap_property(application_property):
self.properties[name] = application_property.__name__
return application_property
return wrap_property
def delegate(self, f):
"""Decorator to assign a delegate application.
An application method can assign a delegate application. Whenever
an attribute is not available, it will be requested from the
delegate instead.
Examples
--------
>>> class Foo(Brick):
... @application(outputs=['baz'])
... def apply(self, x):
... return x + 1
...
... @apply.property('inputs')
... def apply_inputs(self):
... return ['foo', 'bar']
>>> class Bar(Brick):
... def __init__(self, foo):
... self.foo = foo
...
... @application(outputs=['foo'])
... def apply(self, x):
... return x + 1
...
... @apply.delegate
... def apply_delegate(self):
... return self.foo.apply
>>> foo = Foo()
>>> bar = Bar(foo)
>>> bar.apply.outputs
['foo']
>>> bar.apply.inputs
['foo', 'bar']
"""
self.delegate_function = f.__name__
return f
def __get__(self, instance, owner):
"""Instantiate :class:`BoundApplication` for each :class:`Brick`."""
if instance is None:
return self
if not hasattr(instance, "_bound_applications"):
instance._bound_applications = {}
key = "{}.{}".format(self.brick.__name__, self.application_name)
return instance._bound_applications.setdefault(
key, BoundApplication(self, instance))
def __getattr__(self, name):
# Mimic behavior of properties
if 'properties' in self.__dict__ and name in self.properties:
return property(create_unbound_method(
getattr(self, self.properties[name]), self.brick))
raise AttributeError
def __setattr__(self, name, value):
# Mimic behavior of read-only properties
if 'properties' in self.__dict__ and name in self.properties:
raise AttributeError("can't set attribute")
super(Application, self).__setattr__(name, value)
@property_
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, inputs):
args_names, varargs_name, _, _ = inspect.getargspec(
self.application_function)
if not all(input_ in args_names + [varargs_name] for input_ in inputs):
raise ValueError("Unexpected inputs")
self._inputs = inputs
@property_
def name(self):
return self.application_name
def __call__(self, brick, *args, **kwargs):
if not isinstance(brick, Brick) and six.PY2:
raise TypeError
bound_application = self.__get__(brick, brick.__class__)
return self.apply(bound_application, *args, **kwargs)
def apply(self, bound_application, *args, **kwargs):
as_dict = kwargs.pop('as_dict', False)
as_list = kwargs.pop('as_list', False)
if as_list and as_dict:
raise ValueError
brick = bound_application.brick
# Find the names of the inputs to the application method
args_names, varargs_name, _, _ = inspect.getargspec(
self.application_function)
args_names = args_names[1:]
# Construct the ApplicationCall, used to store data in for this call
call = ApplicationCall(bound_application)
args = list(args)
if 'application' in args_names:
args.insert(args_names.index('application'), bound_application)
if 'application_call' in args_names:
args.insert(args_names.index('application_call'), call)
# Allocate before applying, and optionally initialize
if not brick.allocated:
brick.allocate()
# Annotate all the input variables which are Theano variables
def copy_and_tag(variable, role, name):
"""Helper method to copy a variable and annotate it."""
copy = variable.copy()
# Theano name
copy.name = _variable_name(brick.name, self.name, name)
add_annotation(copy, brick)
add_annotation(copy, call)
# Blocks name
copy.tag.name = name
add_role(copy, role)
return copy
for i, input_ in enumerate(args):
if isinstance(input_, tensor.Variable):
if i < len(args_names):
name = args_names[i]
else:
name = "{}_{}".format(varargs_name, i - len(args_names))
args[i] = copy_and_tag(input_, INPUT, name)
for name, input_ in kwargs.items():
if isinstance(input_, tensor.Variable):
kwargs[name] = copy_and_tag(input_, INPUT, name)
# Run the application method on the annotated variables
last_brick = self.call_stack[-1] if self.call_stack else None
if (last_brick and brick is not last_brick and
brick not in last_brick.children):
raise ValueError('Brick ' + str(self.call_stack[-1]) + ' tries '
'to call brick ' + str(self.brick) + ' which '
'is not in the list of its children. This could '
'be caused because an @application decorator is '
'missing.')
self.call_stack.append(brick)
try:
outputs = self.application_function(brick, *args, **kwargs)
outputs = pack(outputs)
finally:
self.call_stack.pop()
# Rename and annotate output variables
for i, output in enumerate(outputs):
if isinstance(output, tensor.Variable):
try:
name = bound_application.outputs[i]
except AttributeError:
name = "output_{}".format(i)
except IndexError:
reraise_as(ValueError("Unexpected outputs"))
# TODO Tag with dimensions, axes, etc. for error-checking
outputs[i] = copy_and_tag(outputs[i],
OUTPUT, name)
# Return values
if as_list:
return outputs
if as_dict:
return OrderedDict(zip(bound_application.outputs, outputs))
return unpack(outputs)
# Application instances are used instead of usual methods in bricks.
# The usual methods are not pickled per-se, similarly to classes
# and modules. Instead, a reference to the method is put into the pickle.
# Here, we ensure the same behaviour for Application instances.
def __reduce__(self):
return (getattr, (self.brick, self.application_name))
class BoundApplication(object):
"""An application method bound to a :class:`Brick` instance."""
def __init__(self, application, brick):
self.application = application
self.brick = brick
def __getattr__(self, name):
# Prevent infinite loops
if name == 'application':
raise AttributeError
# These always belong to the parent (the unbound application)
if name in ('delegate_function', 'properties'):
return getattr(self.application, name)
if name in self.properties.values():
return getattr(self.application.brick, name)
if name in self.properties:
return getattr(self, self.properties[name])(self.brick)
# First try the parent (i.e. class level), before trying the delegate
try:
return getattr(self.application, name)
except AttributeError:
if self.delegate_function:
return getattr(getattr(self.brick,
self.delegate_function)(),
name)
raise
@property
def name(self):
return self.application.name
def __call__(self, *args, **kwargs):
return self.application.apply(self, *args, **kwargs)
def rename_function(function, new_name):
old_name = function.__name__
function.__name__ = new_name
if six.PY3:
function.__qualname__ = \
function.__qualname__[:-len(old_name)] + new_name
return function
class _Brick(ABCMeta):
"""Metaclass which attaches brick instances to the applications.
In addition picklability of :class:`Application` objects is ensured.
This means that :class:`Application` objects can not be added to a
brick class after it is created. To allow adding application methods
programatically, the following hook is supported: the class namespace
is searched for `decorators` attribute, which can contain a
list of functions to be applied to the namespace of the class being
created. These functions can arbitratily modify this namespace.
"""
def __new__(mcs, name, bases, namespace):
decorators = namespace.get('decorators', [])
for decorator in decorators:
decorator(mcs, name, bases, namespace)
for attr in list(namespace.values()):
if (isinstance(attr, Application) and
hasattr(attr, '_application_function')):
namespace['_' + attr.application_name] = \
rename_function(attr._application_function,
'_' + attr.application_name)
del attr._application_function
brick = super(_Brick, mcs).__new__(mcs, name, bases, namespace)
for attr in namespace.values():
if isinstance(attr, Application):
attr.brick = brick
return brick
@add_metaclass(_Brick)
class Brick(Annotation):
"""A brick encapsulates Theano operations with parameters.
A brick goes through the following stages:
1. Construction: The call to :meth:`__init__` constructs a
:class:`Brick` instance with a name and creates any child bricks as
well.
2. Allocation of parameters:
a) Allocation configuration of children: The
:meth:`push_allocation_config` method configures any children of
this block.
b) Allocation: The :meth:`allocate` method allocates the shared
Theano variables required for the parameters. Also allocates
parameters for all children.
3. The following can be done in either order:
a) Application: By applying the brick to a set of Theano
variables a part of the computational graph of the final model is
constructed.
b) The initialization of parameters:
1. Initialization configuration of children: The
:meth:`push_initialization_config` method configures any
children of this block.
2. Initialization: This sets the initial values of the
parameters by a call to :meth:`initialize`, which is needed
to call the final compiled Theano function. Also initializes
all children.
Not all stages need to be called explicitly. Step 3(a) will
automatically allocate the parameters if needed. Similarly, step
3(b.2) and 2(b) will automatically perform steps 3(b.1) and 2(a) if
needed. They only need to be called separately if greater control is
required. The only two methods which always need to be called are an
application method to construct the computational graph, and the
:meth:`initialize` method in order to initialize the parameters.
At each different stage, a brick might need a certain set of
configuration settings. All of these settings can be passed to the
:meth:`__init__` constructor. However, by default many bricks support
*lazy initialization*. This means that the configuration settings can
be set later.
.. note::
Some arguments to :meth:`__init__` are *always* required, even when
lazy initialization is enabled. Other arguments must be given before
calling :meth:`allocate`, while others yet only need to be given in
order to call :meth:`initialize`. Always read the documentation of
each brick carefully.
Lazy initialization can be turned off by setting ``Brick.lazy =
False``. In this case, there is no need to call :meth:`initialize`
manually anymore, but all the configuration must be passed to the
:meth:`__init__` method.
Parameters
----------
name : str, optional
The name of this brick. This can be used to filter the application
of certain modifications by brick names. By default, the brick
receives the name of its class (lowercased).
Attributes
----------
name : str
The name of this brick.
print_shapes : bool
``False`` by default. If ``True`` it logs the shapes of all the
input and output variables, which can be useful for debugging.
parameters : list of :class:`~tensor.TensorSharedVariable` and ``None``
After calling the :meth:`allocate` method this attribute will be
populated with the shared variables storing this brick's
parameters. Allows for ``None`` so that parameters can always be
accessed at the same index, even if some parameters are only
defined given a particular configuration.
children : list of bricks
The children of this brick.
allocated : bool
``False`` if :meth:`allocate` has not been called yet. ``True``
otherwise.
initialized : bool
``False`` if :meth:`allocate` has not been called yet. ``True``
otherwise.
allocation_config_pushed : bool
``False`` if :meth:`allocate` or :meth:`push_allocation_config`
hasn't been called yet. ``True`` otherwise.
initialization_config_pushed : bool
``False`` if :meth:`initialize` or
:meth:`push_initialization_config` hasn't been called yet. ``True``
otherwise.
Notes
-----
To provide support for lazy initialization, apply the :meth:`lazy`
decorator to the :meth:`__init__` method.
Brick implementations *must* call the :meth:`__init__` constructor of
their parent using `super(BlockImplementation,
self).__init__(**kwargs)` at the *beginning* of the overriding
`__init__`.
The methods :meth:`_allocate` and :meth:`_initialize` need to be
overridden if the brick needs to allocate shared variables and
initialize their values in order to function.
A brick can have any number of methods which apply the brick on Theano
variables. These methods should be decorated with the
:func:`application` decorator.
If a brick has children, they must be listed in the :attr:`children`
attribute. Moreover, if the brick wants to control the configuration of
its children, the :meth:`_push_allocation_config` and
:meth:`_push_initialization_config` methods need to be overridden.
Examples
--------
Most bricks have lazy initialization enabled.
>>> import theano
>>> from blocks.initialization import IsotropicGaussian, Constant
>>> from blocks.bricks import Linear
>>> linear = Linear(input_dim=5, output_dim=3,
... weights_init=IsotropicGaussian(),
... biases_init=Constant(0))
>>> x = theano.tensor.vector()
>>> linear.apply(x) # Calls linear.allocate() automatically
linear_apply_output
>>> linear.initialize() # Initializes the weight matrix
"""
#: See :attr:`Brick.print_shapes`
print_shapes = False
def __init__(self, name=None):
if name is None:
name = self.__class__.__name__.lower()
self.name = name
self.children = []
self.parents = []
self.allocated = False
self.allocation_config_pushed = False
self.initialized = False
self.initialization_config_pushed = False
super(Brick, self).__init__()
def __repr__(self):
return repr_attrs(self, 'name')
@property
def parameters(self):
return self._parameters
@parameters.setter
def parameters(self, value):
self._parameters = Parameters(self, value)
@property
def children(self):
return self._children
@children.setter
def children(self, value):
self._children = Children(self, value)
def allocate(self):
"""Allocate shared variables for parameters.
Based on the current configuration of this :class:`Brick` create
Theano shared variables to store the parameters. After allocation,
parameters are accessible through the :attr:`parameters` attribute.
This method calls the :meth:`allocate` method of all children
first, allowing the :meth:`_allocate` method to override the
parameters of the children if needed.
Raises
------
ValueError
If the configuration of this brick is insufficient to determine
the number of parameters or their dimensionality to be
initialized.
Notes
-----
This method sets the :attr:`parameters` attribute to an empty list.
This is in order to ensure that calls to this method completely
reset the parameters.
"""
if hasattr(self, 'allocation_args'):
missing_config = [arg for arg in self.allocation_args
if getattr(self, arg) is NoneAllocation]
if missing_config:
raise ValueError('allocation config not set: '
'{}'.format(', '.join(missing_config)))
if not self.allocation_config_pushed:
self.push_allocation_config()
for child in self.children:
child.allocate()
self.parameters = []
self._allocate()
self.allocated = True
def _allocate(self):
"""Brick implementation of parameter initialization.
Implement this if your brick needs to allocate its parameters.
.. warning::
This method should never be called directly. Call
:meth:`initialize` instead.
"""
pass
def initialize(self):
"""Initialize parameters.
Intialize parameters, such as weight matrices and biases.
Notes
-----
If the brick has not allocated its parameters yet, this method will
call the :meth:`allocate` method in order to do so.
"""
if hasattr(self, 'initialization_args'):
missing_config = [arg for arg in self.initialization_args
if getattr(self, arg) is NoneInitialization]
if missing_config:
raise ValueError('initialization config not set: '
'{}'.format(', '.join(missing_config)))
if not self.allocated:
self.allocate()
if not self.initialization_config_pushed:
self.push_initialization_config()
for child in self.children:
child.initialize()
self._initialize()
self.initialized = True
def _initialize(self):
"""Brick implementation of parameter initialization.
Implement this if your brick needs to initialize its parameters.
.. warning::
This method should never be called directly. Call
:meth:`initialize` instead.
"""
pass
def push_allocation_config(self):
"""Push the configuration for allocation to child bricks.
Bricks can configure their children, based on their own current
configuration. This will be automatically done by a call to
:meth:`allocate`, but if you want to override the configuration of
child bricks manually, then you can call this function manually.
"""
self._push_allocation_config()
self.allocation_config_pushed = True
for child in self.children:
try:
child.push_allocation_config()
except Exception:
self.allocation_config_pushed = False
raise
def _push_allocation_config(self):
"""Brick implementation of configuring child before allocation.
Implement this if your brick needs to set the configuration of its
children before allocation.
.. warning::
This method should never be called directly. Call
:meth:`push_allocation_config` instead.
"""
pass
def push_initialization_config(self):
"""Push the configuration for initialization to child bricks.
Bricks can configure their children, based on their own current
configuration. This will be automatically done by a call to
:meth:`initialize`, but if you want to override the configuration
of child bricks manually, then you can call this function manually.
"""
self._push_initialization_config()
self.initialization_config_pushed = True
for child in self.children:
try:
child.push_initialization_config()
except Exception:
self.initialization_config_pushed = False
raise
def _push_initialization_config(self):
"""Brick implementation of configuring child before initialization.
Implement this if your brick needs to set the configuration of its
children before initialization.
.. warning::
This method should never be called directly. Call
:meth:`push_initialization_config` instead.
"""
pass
def get_dim(self, name):
"""Get dimension of an input/output variable of a brick.
Parameters
----------
name : str
The name of the variable.
"""
raise ValueError("No dimension information for {} available"
.format(name))
def get_dims(self, names):
"""Get list of dimensions for a set of input/output variables.
Parameters
----------
names : list
The variable names.
Returns
-------
dims : list
The dimensions of the sources.
"""
return [self.get_dim(name) for name in names]
def get_unique_path(self):
"""Returns unique path to this brick in the application graph."""
if self.parents:
parent = min(self.parents, key=attrgetter('name'))
return parent.get_unique_path() + [self]
else:
return [self]
def args_to_kwargs(args, f):
arg_names, vararg_names, _, _ = inspect.getargspec(f)
return dict((arg_name, arg) for arg_name, arg
in zip(arg_names + [vararg_names], args))
class LazyNone(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def __bool__(self):
return False
__nonzero__ = __bool__
NoneAllocation = LazyNone('NoneAllocation')
NoneInitialization = LazyNone('NoneInitialization')
def lazy(allocation=None, initialization=None):
"""Makes the initialization lazy.
This decorator allows the user to define positional arguments which
will not be needed until the allocation or initialization stage of the
brick. If these arguments are not passed, it will automatically replace
them with a custom ``None`` object. It is assumed that the missing
arguments can be set after initialization by setting attributes with
the same name.
Parameters
----------
allocation : list
A list of argument names that are needed for allocation.
initialization : list
A list of argument names that are needed for initialization.
Examples
--------
>>> class SomeBrick(Brick):
... @lazy(allocation=['a'], initialization=['b'])
... def __init__(self, a, b, c='c', d=None):
... print(a, b, c, d)
>>> brick = SomeBrick('a')
a NoneInitialization c None
>>> brick = SomeBrick(d='d', b='b')
NoneAllocation b c d
"""
if not allocation:
allocation = []
if not initialization:
initialization = []
def lazy_wrapper(init):
def lazy_init(*args, **kwargs):
self = args[0]
self.allocation_args = (getattr(self, 'allocation_args',
[]) + allocation)
self.initialization_args = (getattr(self, 'initialization_args',
[]) + initialization)
kwargs = dict_union(args_to_kwargs(args, init), kwargs)
for allocation_arg in allocation:
kwargs.setdefault(allocation_arg, NoneAllocation)
for initialization_arg in initialization:
kwargs.setdefault(initialization_arg, NoneInitialization)
return init(**kwargs)
wraps(init)(lazy_init)
return lazy_init
return lazy_wrapper
class ApplicationCall(Annotation):
"""A link between the variable tags and bricks.
The application call can be used to attach to an apply call auxiliary
variables (e.g. monitors or regularizers) that do not form part of the
main computation graph.
The application call object is created before the call to the
application method and can be accessed by specifying an
application_call argument.
Also see :class:`.Annotation`.
Parameters
----------
application : :class:`BoundApplication` instance
The bound application (i.e. belong to a brick instance) object
being called
Examples
--------
>>> class Foo(Brick):
... @application
... def apply(self, x, application_call):
... application_call.add_auxiliary_variable(x.mean())
... return x + 1
>>> x = tensor.vector()
>>> y = Foo().apply(x)
>>> from blocks.filter import get_application_call
>>> get_application_call(y) # doctest: +ELLIPSIS
<blocks.bricks.base.ApplicationCall object at ...>
"""
def __init__(self, application):
self.application = application
self.metadata = {}
super(ApplicationCall, self).__init__()
def add_auxiliary_variable(self, variable, roles=None, name=None):
if name:
variable.name = _variable_name(
self.application.brick.name, self.application.name, name)
variable.tag.name = name
name = None
add_annotation(variable, self.application.brick)
return super(ApplicationCall, self).add_auxiliary_variable(
variable, roles, name)
def application(*args, **kwargs):
r"""Decorator for methods that apply a brick to inputs.
Parameters
----------
\*args, optional
The application method to wrap.
\*\*kwargs, optional
Attributes to attach to this application.
Notes
-----
This decorator replaces application methods with :class:`Application`
instances. It also sets the attributes given as keyword arguments to
the decorator.
Note that this decorator purposely does not wrap the original method
using e.g. :func:`~functools.wraps` or
:func:`~functools.update_wrapper`, since that would make the class
impossible to pickle (see notes at :class:`Application`).
Examples
--------
>>> class Foo(Brick):
... @application(inputs=['x'], outputs=['y'])
... def apply(self, x):
... return x + 1
... @application
... def other_apply(self, x):
... return x - 1
>>> foo = Foo()
>>> Foo.apply.inputs
['x']
>>> foo.apply.outputs
['y']
>>> Foo.other_apply # doctest: +ELLIPSIS
<blocks.bricks.base.Application object at ...>
"""
if not ((args and not kwargs) or (not args and kwargs)):
raise ValueError
if args:
application_function, = args
application = Application(application_function)
return application
else:
def wrap_application(application_function):
application = Application(application_function)
for key, value in kwargs.items():
setattr(application, key, value)
return application
return wrap_application
def _variable_name(brick_name, application_name, name):
return "{}_{}_{}".format(brick_name, application_name, name)
|
|
#!/usr/bin/env python
import praw # pip install praw
import humanfriendly # pip install humanfriendly
import imgurpython # pip install imgurpython
import argparse
from time import sleep
import socket
import warnings
import os
import os.path
import sys
from imgurpython.helpers.error import ImgurClientError
import requests
import pprint
import urllib2
from bs4 import BeautifulSoup
import datetime
__author__ = 'scottkuma'
#INSERT YOUR API INFO HERE
REDDIT_CLIENT_ID = ''
REDDIT_CLIENT_SECRET = ''
REDDIT_USERNAME=''
REDDIT_PASSWORD=''
IMGUR_CLIENT_ID = ''
IMGUR_CLIENT_SECRET = ''
# TODO: make a requirements.txt file to auto-install requirements
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = argparse.ArgumentParser(description='Scrape images from a Subreddit',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('subreddit')
parser.add_argument('--basedir', '-b',
help="Base directory to save files to - will be appended with a subreddit directory.\
Defaults to <current directory>/Pictures.",
default=os.getcwd())
parser.add_argument('-t', '--threshold',
metavar='T',
help="Reject posts with less than this # of upvotes",
default=0,
type=int)
parser.add_argument('--timeout', '--to',
help="Stop attempting to connect to a submission's URL after X seconds",
metavar='X',
default=10,
type=int)
parser.add_argument('--sleep',
help="# of seconds to sleep between attempts to scrape subreddit",
metavar='S',
default=300,
type=int)
parser.add_argument('--limit',
help="Max # of submissions to retrieve",
default=100,
type=int)
parser.add_argument('--albumthreshold',
help="Above this #, will download into subdirectories",
default=5,
type=int)
parser.add_argument('--iterate',
help="Iterate over group endlessly",
action='store_true')
post_types = ['hot', 'new', 'rising', 'controversial', 'top']
parser.add_argument('--type', '-p',
help="Fetch a certain type of reddit submissions",
choices=post_types,
default='new')
time_periods = ['hour', 'day', 'week', 'month', 'year', 'all']
parser.add_argument('--period',
help='Limit the time period for submissions. Only affects "top" and "controversial" requests.',
choices=time_periods,
default="all")
# human-parseable filesize limits.
parser.add_argument('--maxsize',
help='Do not download files that are greater than this size in bytes.',
default="30M")
group = parser.add_mutually_exclusive_group()
group.add_argument('--multireddit', '-m',
help="Download from the user's multireddit",
action='store_true')
group.add_argument('--search', '-s',
help="Search for a string in reddit",
action='store_true')
args = parser.parse_args()
# Create necessary variables for script
# Delete any trailing slashes
if args.subreddit[-1] == "/":
args.subreddit = args.subreddit[:-1]
save_dir = args.basedir + "/" + args.subreddit + "/"
already_done = [] # variable to hold reddit submission IDs that were parsed
# Trying to prevent work being done twice...
parsed = 0 # number of URLs parsed
imgur_api_call_count = 0 # tracking variable for # of calls to imgur API
KEEP_CHARACTERS = (' ', '.', '_') # characters (other than alphanumeric) to keep in filenames
filetypes = ['jpg', 'jpeg', 'png', 'webm', 'gif'] # file types to download
saved = 0
MAX_SIZE = humanfriendly.parse_size(args.maxsize)
sleeptime = int(args.sleep)
allurls = [['Title', 'URL', 'UTC Created Time', 'Subreddit Name', 'Permalink']]
if args.limit == 100:
print "Default limit of 100 URLs parsed!"
# set socket timeout
socket.setdefaulttimeout(args.timeout)
try:
# Create connection to reddit...
r = praw.Reddit(client_id=REDDIT_CLIENT_ID,
client_secret=REDDIT_CLIENT_SECRET,
username=REDDIT_USERNAME,
password=REDDIT_PASSWORD,
user_agent='SubRedditScraper v0.8')
me = r.user.me()
multis = {}
for m in me.multireddits():
thispath = m.path.split('/')[-1]
multis[thispath] = m
# Create API connection to IMGUR...
ic = imgurpython.ImgurClient(IMGUR_CLIENT_ID, IMGUR_CLIENT_SECRET)
try:
subreddit = ''
if args.multireddit:
if args.subreddit in multis:
subreddit = multis[args.subreddit]
else:
print "\n\n** ERROR: Multireddit {} does not exit for user {}.".format(args.subreddit, REDDIT_USERNAME)
sys.exit(0)
elif args.search:
subreddit = r.subreddit('all').search(args.subreddit, limit=args.limit)
else:
subreddit = r.subreddit(args.subreddit) # get_top_from_all
except ():
print "\n\n** ERROR: Subreddit '{}' does not exist.".format(args.subreddit)
sys.exit(0)
d = os.path.dirname(save_dir)
if not os.path.exists(d):
os.makedirs(d)
iterate_once = True
while args.iterate or iterate_once:
iterate_once = False
if args.search:
submissions = subreddit
else:
if args.type == 'top':
print "*** Fetching TOP submissions over past {}...".format(args.period)
submissions = subreddit.top(args.period,limit=args.limit)
elif args.type == 'controversial':
print "*** Fetching CONTROVERSIAL submissions over past {}...".format(args.period)
submissions = subreddit.controversial(args.period,limit=args.limit)
elif args.type == 'hot':
print "*** Fetching HOT submissions..."
submissions = subreddit.hot(limit=args.limit)
elif args.type == 'rising':
print "*** Fetching RISING submissions..."
submissions = subreddit.rising(limit=args.limit)
else:
print "*** Fetching NEW submissions..."
submissions = subreddit.new(limit=args.limit)
for sub in submissions:
parsed += 1
#pprint.pprint(vars(sub))
#sys.exit(0)
if sub.score >= args.threshold and sub.id not in already_done:
print "\n"
print "-=" * 30 + "-"
print "Item # ", parsed, "/", args.limit
print "Title: ", sub.title.encode(encoding="UTF-8")
print "Item Score: ", sub.score, "\n"
url = sub.url
allurls.append([sub.title, url, datetime.datetime.fromtimestamp(sub.created_utc).strftime('%Y-%m-%d %H:%M:%S'), sub.subreddit_name_prefixed, u"https://www.reddit.com" + sub.permalink])
# Some sources provide more than one URL to parse...
# We'll store these in a list, which also gives us the
# ability to skip over sites that we can't parse yet.
urllist = []
# Some trailing slashes can cause problems for our little client.
# we must remove them.
if url[-1] == '/':
url = url[:-1]
# Detect Special Cases
if "imgur" in url and url.split('/')[-2] == 'a':
print "Downloading from imgur album..."
albumid = url.split('/')[-1].split('?')[0].split('#')[0]
try:
imagelist = ic.get_album_images(albumid)
print "# images found: " + str(len(imagelist))
if len(imagelist) > args.albumthreshold:
albumflag = True
else:
albumflag = False
for img in imagelist:
if albumflag:
urllist.append([img.link, albumid])
else:
print img.link
urllist.append([img.link])
except ImgurClientError as e:
print "Error Message:", e.error_message
print "Error code:", e.status_code
print "URL:", url
print "Continuing...."
imgur_api_call_count += 1
print len(urllist), "images found"
# need to remove anything after the image id, so...
# removing anything that follows a ? or #
elif "imgur" in url and url.split('.')[-1].split('?')[0].split('#')[0] not in filetypes:
print "SPECIAL SNOWFLAKE: URL: {}".format(url)
imageid = url.split('/')[-1].split('?')[0].split('#')[0].split('.')[0]
print "IMAGEID: {}".format(imageid)
try:
filetype = ic.get_image(imageid).type
imgur_api_call_count += 1
print "Trimmed file: Filetype: {}".format(filetype)
if filetype == 'image/jpeg':
url += '.jpg'
elif filetype == 'image/gif':
url += '.gif'
elif filetype == 'image/png':
url += '.png'
elif filetype == 'text':
url = url.split('.')[0]
url += '.gif'
else:
print "Filetype: {}".format(filetype)
print "-->", url
urllist.append([url])
except ImgurClientError as e:
print "Error Message:", e.error_message
print "Error code:", e.status_code
print "URL:", url
print "Continuing...."
# TODO: download giphy GIFs (need to work on this - may not ever work!)
elif "giphy" in url:
print "+" * 30, "GIPHY not implemented yet.... (skipping)"
# download gfycat GIFs
elif "gfycat" in url:
print url
try:
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page)
soup.prettify()
#print soup.find_all('source', {'type': "video/webm"})
for anchor in soup.find_all('source', {'type': "video/webm"}):
#print anchor['src']
if [anchor['src']] not in urllist:
urllist.append( [anchor['src']] )
except TypeError as e:
print "Could not find a webm link"
print "Continuing"
# download flickr pictures (Does not currently work, skips these photos)
elif "flickr" in url:
print "+" * 30, "FLICKR found.... (skipping)"
else:
# Not one of our special sites, so just append the URL to the list for download
urllist.append([url])
fileinURL = 0
for urlpair in urllist:
url = urlpair[0]
albumid = False
filedesc = "".join(c for c in sub.title if c.isalnum() or c in KEEP_CHARACTERS).rstrip()
filename = str(url).split('/')[-1]
if len(urlpair) > 1:
albumid = urlpair[1]
album_save_dir = save_dir + filedesc + " - " + albumid + "/"
ad = os.path.dirname(album_save_dir)
if not os.path.exists(ad):
print "MAKING DIRECTORY " + ad
os.makedirs(ad)
fileinURL += 1 # increment this counter
if len(urllist) > 1:
fullfilename = filedesc + " - {0:03d} - ".format(fileinURL) + filename
else:
fullfilename = filedesc + " - " + filename
if len(urlpair) > 1:
file_path = ad + "/" + fullfilename
else:
file_path = save_dir + fullfilename
if os.path.isfile(file_path.encode(encoding="UTF-8")):
print "** Skipping \"" + fullfilename + "\" - file already exists..."
else:
try:
response = requests.get(url, stream=True)
if response.headers.get('content-length') is None:
total_length = 0
else:
total_length = int(response.headers.get('content-length'))
print "TL: {}".format(total_length)
print "RH: {}".format(response.headers['content-type'].split('/')[0])
if total_length > 0 and total_length < MAX_SIZE and \
response.headers['content-type'].split('/')[0] in ['video', 'image']:
print "Saving to: \"{}\"".format(file_path.encode('utf-8'))
with open(file_path, 'wb') as out_file:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=1024):
out_file.write(data)
# Calculate & write progress bar to standard output.
dl += len(data)
done = int(30 * dl / total_length)
sys.stdout.write("\r[%s>%s] %s / %s " % (
'=' * (done - 1), ' ' * (30 - done), humanfriendly.format_size(dl, True),
humanfriendly.format_size(total_length, True)))
sys.stdout.flush()
print " "
# remove response object from memory to prevent leak.
del response
saved += 1
elif total_length >= MAX_SIZE:
print "Skipped - File length {} is greater than maximum size of {} bytes...".format(
total_length, MAX_SIZE)
print url
elif response.headers['content-type'].split('/')[0] not in ['video', 'image']:
print "Skipped - response type not \"image\""
print url
else:
print "Skipped - File is either not an image or 0 length..."
print url
except (IOError, UnicodeEncodeError) as e:
print "Unable to retrieve this URL!"
print url
print e
sleep(5)
pass
already_done.append(sub.id)
if args.iterate:
print "\n\n Statistics:"
print "-----------"
print parsed, "URLs parsed"
# print parsed - len(already_done), "URLs skipped"
print imgur_api_call_count, "calls made to IMGUR API."
print saved, "images saved to directory."
if args.iterate:
print "\n\nSubreddit iteration complete. Sleeping for {} seconds. **YAWN**".format(sleeptime)
sleep(args.sleep)
except KeyboardInterrupt:
print "\n\nCaught Keyboard Interrupt...ending gracefully."
finally:
print "\n\n Final Statistics:"
print "-----------------"
print parsed, "URLs parsed"
# print parsed - len(already_done), "URLs skipped"
print imgur_api_call_count, "calls made to IMGUR API."
print saved, "images saved to directory."
with open(save_dir +"__allurls.csv",'w') as outfile:
line = 0
for u in allurls:
line += 1
#print u
outstring = u"\"{}\", {}, {}, \"{}\", {}\n".format(u[0], u[1], u[2], u[3], u[4]).encode('UTF-8')
#print(outstring)
outfile.write(outstring)
|
|
import numpy as np
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import is_integer_dtype, is_list_like, is_scalar
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
import pandas.core.common as com
from pandas.core.frame import _shared_docs
from pandas.core.groupby import Grouper
from pandas.core.index import Index, MultiIndex, _get_objs_combined_axis
from pandas.core.reshape.concat import concat
from pandas.core.reshape.util import cartesian_product
from pandas.core.series import Series
# Note: We need to make sure `frame` is imported before `pivot`, otherwise
# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency
@Substitution('\ndata : DataFrame')
@Appender(_shared_docs['pivot_table'], indents=1)
def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
fill_value=None, margins=False, dropna=True,
margins_name='All', observed=False):
index = _convert_by(index)
columns = _convert_by(columns)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(data, values=values, index=index,
columns=columns,
fill_value=fill_value, aggfunc=func,
margins=margins, dropna=dropna,
margins_name=margins_name,
observed=observed)
pieces.append(table)
keys.append(getattr(func, '__name__', func))
return concat(pieces, keys=keys, axis=1)
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys, observed=observed)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
agged = agged.dropna(how='all')
# gh-21133
# we want to down cast if
# the original values are ints
# as we grouped with a NaN value
# and then dropped, coercing to floats
for v in values:
if (v in data and is_integer_dtype(data[v]) and
v in agged and not is_integer_dtype(agged[v])):
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
if table.index.nlevels > 1:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[:len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
from pandas import MultiIndex
if table.index.nlevels > 1:
m = MultiIndex.from_arrays(cartesian_product(table.index.levels),
names=table.index.names)
table = table.reindex(m, axis=0)
if table.columns.nlevels > 1:
m = MultiIndex.from_arrays(cartesian_product(table.columns.levels),
names=table.columns.names)
table = table.reindex(m, axis=1)
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value, downcast='infer')
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(table, data, values, rows=index,
cols=columns, aggfunc=aggfunc,
observed=dropna,
margins_name=margins_name, fill_value=fill_value)
# discard the top level
if (values_passed and not values_multi and not table.empty and
(table.columns.nlevels > 1)):
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how='all', axis=1)
return table
def _add_margins(table, data, values, rows, cols, aggfunc,
observed=None, margins_name='All', fill_value=None):
if not isinstance(margins_name, str):
raise ValueError('margins_name argument must be a string')
msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(msg)
grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name)
# could be passed a Series object with no 'columns'
if hasattr(table, 'columns'):
for level in table.columns.names[1:]:
if margins_name in table.columns.get_level_values(level):
raise ValueError(msg)
if len(rows) > 1:
key = (margins_name,) + ('',) * (len(rows) - 1)
else:
key = margins_name
if not values and isinstance(table, ABCSeries):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
return table.append(Series({key: grand_margin[margins_name]}))
if values:
marginal_result_set = _generate_marginal_results(table, data, values,
rows, cols, aggfunc,
observed,
grand_margin,
margins_name)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
marginal_result_set = _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc, observed, margins_name)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
row_margin = row_margin.reindex(result.columns, fill_value=fill_value)
# populate grand margin
for k in margin_keys:
if isinstance(k, str):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
from pandas import DataFrame
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
try:
for dtype in set(result.dtypes):
cols = result.select_dtypes([dtype]).columns
margin_dummy[cols] = margin_dummy[cols].astype(dtype)
result = result.append(margin_dummy)
except TypeError:
# we cannot reshape, so coerce the axis
result.index = result.index._to_safe_for_reshape()
result = result.append(margin_dummy)
result.index.names = row_names
return result
def _compute_grand_margin(data, values, aggfunc,
margins_name='All'):
if values:
grand_margin = {}
for k, v in data[values].iteritems():
try:
if isinstance(aggfunc, str):
grand_margin[k] = getattr(v, aggfunc)()
elif isinstance(aggfunc, dict):
if isinstance(aggfunc[k], str):
grand_margin[k] = getattr(v, aggfunc[k])()
else:
grand_margin[k] = aggfunc[k](v)
else:
grand_margin[k] = aggfunc(v)
except TypeError:
pass
return grand_margin
else:
return {margins_name: aggfunc(data.index)}
def _generate_marginal_results(table, data, values, rows, cols, aggfunc,
observed,
grand_margin,
margins_name='All'):
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
margin_keys = []
def _all_key(key):
return (key, margins_name) + ('',) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows + values].groupby(
rows, observed=observed).agg(aggfunc)
cat_axis = 1
for key, piece in table.groupby(level=0,
axis=cat_axis,
observed=observed):
all_key = _all_key(key)
# we are going to mutate this, so need to copy!
piece = piece.copy()
try:
piece[all_key] = margin[key]
except TypeError:
# we cannot reshape, so coerce the axis
piece.set_axis(piece._get_axis(
cat_axis)._to_safe_for_reshape(),
axis=cat_axis, inplace=True)
piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
else:
margin = grand_margin
cat_axis = 0
for key, piece in table.groupby(level=0,
axis=cat_axis,
observed=observed):
all_key = _all_key(key)
table_pieces.append(piece)
table_pieces.append(Series(margin[key], index=[all_key]))
margin_keys.append(all_key)
result = concat(table_pieces, axis=cat_axis)
if len(rows) == 0:
return result
else:
result = table
margin_keys = table.columns
if len(cols) > 0:
row_margin = data[cols + values].groupby(
cols, observed=observed).agg(aggfunc)
row_margin = row_margin.stack()
# slight hack
new_order = [len(cols)] + list(range(len(cols)))
row_margin.index = row_margin.index.reorder_levels(new_order)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc,
observed, margins_name='All'):
if len(cols) > 0:
# need to "interleave" the margins
margin_keys = []
def _all_key():
if len(cols) == 1:
return margins_name
return (margins_name, ) + ('', ) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows].groupby(rows,
observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
else:
margin = data.groupby(level=0,
axis=0,
observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
return result
else:
result = table
margin_keys = table.columns
if len(cols):
row_margin = data[cols].groupby(cols, observed=observed).apply(aggfunc)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _convert_by(by):
if by is None:
by = []
elif (is_scalar(by) or
isinstance(by, (np.ndarray, Index, ABCSeries, Grouper)) or
hasattr(by, '__call__')):
by = [by]
else:
by = list(by)
return by
@Substitution('\ndata : DataFrame')
@Appender(_shared_docs['pivot'], indents=1)
def pivot(data, index=None, columns=None, values=None):
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = data.set_index(cols, append=append)
else:
if index is None:
index = data.index
else:
index = data[index]
index = MultiIndex.from_arrays([index, data[columns]])
if is_list_like(values) and not isinstance(values, tuple):
# Exclude tuple because it is seen as a single column name
indexed = data._constructor(data[values].values, index=index,
columns=values)
else:
indexed = data._constructor_sliced(data[values].values,
index=index)
return indexed.unstack(columns)
def crosstab(index, columns, values=None, rownames=None, colnames=None,
aggfunc=None, margins=False, margins_name='All', dropna=True,
normalize=False):
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
.. versionadded:: 0.18.1
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
rownames = _get_names(index, rownames, prefix='row')
colnames = _get_names(columns, colnames, prefix='col')
common_idx = _get_objs_combined_axis(index + columns, intersect=True,
sort=False)
data = {}
data.update(zip(rownames, index))
data.update(zip(colnames, columns))
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
from pandas import DataFrame
df = DataFrame(data, index=common_idx)
if values is None:
df['__dummy__'] = 0
kwargs = {'aggfunc': len, 'fill_value': 0}
else:
df['__dummy__'] = values
kwargs = {'aggfunc': aggfunc}
table = df.pivot_table('__dummy__', index=rownames, columns=colnames,
margins=margins, margins_name=margins_name,
dropna=dropna, **kwargs)
# Post-process
if normalize is not False:
table = _normalize(table, normalize=normalize, margins=margins,
margins_name=margins_name)
return table
def _normalize(table, normalize, margins, margins_name='All'):
if not isinstance(normalize, (bool, str)):
axis_subs = {0: 'index', 1: 'columns'}
try:
normalize = axis_subs[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
if margins is False:
# Actual Normalizations
normalizers = {
'all': lambda x: x / x.sum(axis=1).sum(axis=0),
'columns': lambda x: x / x.sum(),
'index': lambda x: x.div(x.sum(axis=1), axis=0)
}
normalizers[True] = normalizers['all']
try:
f = normalizers[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
table = f(table)
table = table.fillna(0)
elif margins is True:
column_margin = table.loc[:, margins_name].drop(margins_name)
index_margin = table.loc[margins_name, :].drop(margins_name)
table = table.drop(margins_name, axis=1).drop(margins_name)
# to keep index and columns names
table_index_names = table.index.names
table_columns_names = table.columns.names
# Normalize core
table = _normalize(table, normalize=normalize, margins=False)
# Fix Margins
if normalize == 'columns':
column_margin = column_margin / column_margin.sum()
table = concat([table, column_margin], axis=1)
table = table.fillna(0)
elif normalize == 'index':
index_margin = index_margin / index_margin.sum()
table = table.append(index_margin)
table = table.fillna(0)
elif normalize == "all" or normalize is True:
column_margin = column_margin / column_margin.sum()
index_margin = index_margin / index_margin.sum()
index_margin.loc[margins_name] = 1
table = concat([table, column_margin], axis=1)
table = table.append(index_margin)
table = table.fillna(0)
else:
raise ValueError("Not a valid normalize argument")
table.index.names = table_index_names
table.columns.names = table_columns_names
else:
raise ValueError("Not a valid margins argument")
return table
def _get_names(arrs, names, prefix='row'):
if names is None:
names = []
for i, arr in enumerate(arrs):
if isinstance(arr, ABCSeries) and arr.name is not None:
names.append(arr.name)
else:
names.append('{prefix}_{i}'.format(prefix=prefix, i=i))
else:
if len(names) != len(arrs):
raise AssertionError('arrays and names must have the same length')
if not isinstance(names, list):
names = list(names)
return names
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2016 Futur Solo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from futurefinity.utils import (MagicDict, TolerantMagicDict,
FutureFinityError, ensure_str, ensure_bytes)
from futurefinity import security
from collections import namedtuple
from http.cookies import SimpleCookie as HTTPCookies
from http.client import responses as status_code_text
from typing import Union, Optional, Any, List, Mapping, Tuple
import futurefinity
import sys
import json
import string
import traceback
import urllib.parse
_CRLF_MARK = "\r\n"
_CRLF_BYTES_MARK = b"\r\n"
_MAX_INITIAL_LENGTH = 8 * 1024 # 8K
_MAX_BODY_LENGTH = 52428800 # 50M
_CONN_INIT = object()
_CONN_INITIAL_WAITING = object()
_CONN_INITIAL_PARSED = object()
_CONN_STREAMED = object()
_CONN_BODY_WAITING = object()
_CONN_MESSAGE_PARSED = object()
_CONN_INITIAL_WRITTEN = object()
_CONN_BODY_WRITTEN = object()
_CONN_CLOSED = object()
class ProtocolError(FutureFinityError):
"""
FutureFinity Protocol Error.
All Errors from the Protocol are based on this class.
"""
pass
class CapitalizedHTTPv1Headers(dict):
"""
Convert a string to HTTPHeader style capitalize.
.. code-block:: python3
>>> capitalize_header = CapitalizedHTTPv1Header()
>>> capitalize_header["set-cookie"]
'Set-Cookie'
>>> capitalize_header["SET-COOKIE"]
'Set-Cookie'
>>> capitalize_header["sET-CooKIe"]
'Set-Cookie'
>>> capitalize_header["MY-cUsToM-heAdER"]
'My-Custom-Header'
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.update({
"te": "TE",
"age": "Age",
"date": "Date",
"etag": "ETag",
"from": "From",
"host": "Host",
"vary": "Vary",
"allow": "Allow",
"range": "Range",
"accept": "Accept",
"cookie": "Cookie",
"expect": "Expect",
"server": "Server",
"referer": "Referer",
"if-match": "If-Match",
"if-range": "If-Range",
"location": "Location",
"connection": "Connection",
"keep-alive": "Keep-Alive",
"set-cookie": "Set-Cookie",
"user-agent": "User-Agent",
"content-md5": "Content-MD5",
"retry-after": "Retry-After",
"content-type": "Content-Type",
"max-forwards": "Max-Forwards",
"accept-ranges": "Accept-Ranges",
"authorization": "Authorization",
"content-range": "Content-Range",
"if-none-match": "If-None-Match",
"last-modified": "Last-Modified",
"accept-charset": "Accept-Charset",
"content-length": "Content-Length",
"accept-encoding": "Accept-Encoding",
"accept-language": "Accept-Language",
"content-encoding": "Content-Encoding",
"www-authenticate": "WWW-Authenticate",
"if-modified-since": "If-Modified-Since",
"proxy-authenticate": "Proxy-Authenticate",
"if-unmodified-since": "If-Unmodified-Since",
"proxy-authorization": "Proxy-Authorization",
})
def __getitem__(self, key: str) -> str:
if key not in self:
self[key] = key.title()
return dict.__getitem__(self, key)
_capitalize_header = CapitalizedHTTPv1Headers()
class HTTPHeaders(TolerantMagicDict):
"""
HTTPHeaders class, based on TolerantMagicDict.
It has not only all the features from TolerantMagicDict, but also
can parse and make HTTP Headers.
"""
def __str__(self) -> str:
content_list = [(key, value) for (key, value) in self.items()]
return "HTTPHeaders(%s)" % str(content_list)
def copy(self) -> "HTTPHeaders":
return HTTPHeaders(self)
@staticmethod
def parse(data: Union[str, bytes, list,
TolerantMagicDict]) -> "HTTPHeaders":
headers = HTTPHeaders()
headers.load_headers(data)
return headers
def assemble(self) -> bytes:
"""
Assemble a HTTPHeaders Class to HTTP/1.x Form.
"""
headers_str = ""
for (name, value) in self.items():
headers_str += "%s: %s" % (_capitalize_header[name], value)
headers_str += _CRLF_MARK
return ensure_bytes(headers_str)
def load_headers(self, data: Union[str, bytes, list, TolerantMagicDict]):
"""
Load HTTP Headers from another object.
It will raise an Error if the header is invalid.
"""
# For dict-like object.
if hasattr(data, "items"):
for (key, value) in data.items():
self.add(key.strip(), value.strip())
return
if isinstance(data, (str, bytes)):
# For string-like object.
splitted_data = ensure_str(data).split(_CRLF_MARK)
for header in splitted_data:
if not header:
continue
(key, value) = header.split(":", 1)
self.add(key.strip(), value.strip())
return
# For list-like object.
if hasattr(data, "__iter__"):
for (key, value) in data:
self.add(key.strip(), value.strip())
return
raise ValueError("Unknown Type of input data.")
def accept_cookies_for_request(self, cookies: HTTPCookies):
"""
Insert all the cookies as a request cookie header.
"""
cookie_string = ""
if "cookie" in self.keys():
cookie_string += self["cookie"]
for cookie_name, cookie_morsel in cookies.items():
cookie_string += "%(cookie_name)s=%(cookie_value)s; " % {
"cookie_name": cookie_name,
"cookie_value": cookie_morsel.value
}
if cookie_string:
self["cookie"] = cookie_string
def accept_cookies_for_response(self, cookies: HTTPCookies):
"""
Insert all the cookies as response set cookie headers.
"""
for cookie_morsel in cookies.values():
self.add("set-cookie", cookie_morsel.OutputString())
__copy__ = copy
__repr__ = __str__
class HTTPMultipartFileField:
"""
Containing a file as a http form field.
"""
def __init__(self, fieldname: str, filename: str,
content: bytes,
content_type: str="application/octet-stream",
headers: Optional[HTTPHeaders]=None,
encoding: str="binary"):
self.fieldname = fieldname
self.filename = filename
self.content = content
self.content_type = content_type
self.headers = headers or HTTPHeaders()
self.encoding = encoding
def __str__(self) -> str:
return ("HTTPMultipartFileField(filename=%(filename)s, "
"content_type=%(content_type)s, "
"headers=%(headers)s, "
"encoding=%(encoding)s)") % {
"filename": repr(self.filename),
"content_type": repr(self.content_type),
"headers": repr(self.headers),
"encoding": repr(self.encoding)
}
def assemble(self) -> bytes:
"""
Convert this form field to bytes.
"""
self.headers["content-type"] = self.content_type
self.headers["content-transfer-encoding"] = self.encoding
content_disposition = "form-data; "
content_disposition += "name=\"%s\"; " % self.fieldname
content_disposition += "filename=\"%s\"" % self.filename
self.headers["content-disposition"] = content_disposition
field = self.headers.assemble()
field += _CRLF_BYTES_MARK
field += ensure_bytes(self.content)
field += _CRLF_BYTES_MARK
return field
def copy(self) -> "HTTPMultipartFileField":
raise ProtocolError("HTTPMultipartFileField is not copyable.")
__copy__ = copy
class HTTPMultipartBody(TolerantMagicDict):
"""
HTTPBody class, based on TolerantMagicDict.
It has not only all the features from TolerantMagicDict, but also
can parse and make HTTP Body.
"""
def __init__(self, *args, **kwargs):
self.files = TolerantMagicDict()
TolerantMagicDict.__init__(self, *args, **kwargs)
@staticmethod
def parse(content_type: str, data: bytes) -> "HTTPMultipartBody":
"""
Parse HTTP v1 Multipart Body.
It will raise an Error during the parse period if parse failed.
"""
body_args = HTTPMultipartBody()
if not content_type.lower().startswith("multipart/form-data"):
raise ProtocolError("Unknown content-type.")
for field in content_type.split(";"): # Search Boundary
if field.find("boundary=") == -1:
continue
boundary = ensure_bytes(field.split("=")[1])
if boundary.startswith(b'"') and boundary.endswith(b'"'):
boundary = boundary[1:-1]
break
else:
raise ProtocolError("Cannot Find Boundary.")
full_boundary = b"--" + boundary
body_content = data.split(full_boundary + b"--")[0]
full_boundary += _CRLF_BYTES_MARK
splitted_body_content = body_content.split(full_boundary)
for part in splitted_body_content:
if not part:
continue
initial, content = part.split(_CRLF_BYTES_MARK * 2)
headers = HTTPHeaders.parse(initial)
disposition = headers.get_first("content-disposition")
disposition_list = []
disposition_dict = TolerantMagicDict()
for field in disposition.split(";"): # Split Disposition
field = field.strip() # Remove Useless Spaces.
if field.find("=") == -1: # This is not a key-value pair.
disposition_list.append(field)
continue
key, value = field.split("=")
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
disposition_dict.add(key.strip().lower(), value.strip())
if disposition_list[0] != "form-data":
raise ProtocolError("Cannot Parse Body.")
# Mixed form-data will be supported later.
content = content[:-2] # Drop CRLF Mark
if "filename" in disposition_dict.keys():
body_args.files.add(
disposition_dict.get_first("name", ""),
HTTPMultipartFileField(
fieldname=disposition_dict.get_first("name", ""),
filename=disposition_dict.get_first("filename", ""),
content=content,
content_type=headers.get_first(
"content-type", "application/octet-stream"),
headers=headers,
encoding=headers.get_first("content-transfer-encoding",
"binary")))
else:
try:
content = content.decode()
except UnicodeDecodeError:
pass
body_args.add(disposition_dict.get_first("name", ""), content)
return body_args
def assemble(self) -> Tuple[bytes, str]:
"""
Generate HTTP v1 Body to bytes.
It will return the body in bytes and the content-type in str.
"""
body = b""
boundary = "----------FutureFinityFormBoundary"
boundary += ensure_str(security.get_random_str(8)).lower()
content_type = "multipart/form-data; boundary=" + boundary
full_boundary = b"--" + ensure_bytes(boundary)
for field_name, field_value in self.items():
body += full_boundary + _CRLF_BYTES_MARK
if isinstance(field_value, str):
body += b"Content-Disposition: form-data; "
body += ensure_bytes("name=\"%s\"\r\n" % field_name)
body += _CRLF_BYTES_MARK
body += ensure_bytes(field_value)
body += _CRLF_BYTES_MARK
else:
raise ProtocolError("Unknown Field Type")
for file_field in self.files.values():
body += full_boundary + _CRLF_BYTES_MARK
body += file_field.assemble()
body += full_boundary + b"--" + _CRLF_BYTES_MARK
return body, content_type
def __str__(self) -> str:
# Multipart Body is not printable.
return object.__str__(self)
def __repr__(self) -> str:
# Multipart Body is not printable.
return object.__repr__(self)
def copy(self) -> "HTTPMultipartBody":
raise ProtocolError("HTTPMultipartBody is not copyable.")
__copy__ = copy
class HTTPIncomingMessage:
"""
FutureFinity HTTP Incoming Message Class.
This is the base class of `HTTPIncomingRequest` and `HTTPIncomingResponse`.
"""
@property
def _is_chunked_body(self) -> bool:
"""
Return `True` if there is a chunked body in the message.
"""
if not hasattr(self, "__is_chunked_body"):
if self.http_version == 10:
self.__is_chunked_body = False
else:
transfer_encoding = self.headers.get_first("transfer-encoding")
if not transfer_encoding:
self.__is_chunked_body = False
elif transfer_encoding.lower() == "chunked":
self.__is_chunked_body = True
else:
self.__is_chunked_body = False
return self.__is_chunked_body
@property
def scheme(self) -> str:
"""
Return the scheme that the connection used.
"""
if not hasattr(self, "_scheme"):
if self.connection.use_tls:
self._scheme = "https"
else:
self._scheme = "http"
return self._scheme
@property
def _expected_content_length(self) -> int:
"""
Return the expected content length of the message.
"""
if not hasattr(self, "__expected_content_length"):
content_length = self.headers.get_first("content-length")
if not content_length:
# No content length header found.
self.__expected_content_length = -1
elif not content_length.isdecimal():
# Cannot convert content length to integer.
self.__expected_content_length = -1
else:
self.__expected_content_length = int(content_length)
return self.__expected_content_length
@property
def _body_expected(self) -> bool:
"""
Return True if the body is expected.
"""
if hasattr(self, "method"):
if self.method.lower() == "head":
return False
if self._is_chunked_body:
return True
if isinstance(self, HTTPIncomingResponse):
if self.headers.get_first("connection", "").lower() == "close":
return True
if self._expected_content_length != -1:
return True
return False
class HTTPIncomingRequest(HTTPIncomingMessage):
"""
FutureFinity HTTP Incoming Request Class.
This is a subclass of the `HTTPIncomingMessage`.
This class represents a Incoming HTTP Request.
"""
def __init__(self, method: str,
origin_path: str,
headers: HTTPHeaders,
connection: "HTTPv1Connection",
http_version: int=10,
body: Optional[bytes]=None):
self.http_version = http_version
self.method = method
self.origin_path = origin_path
self.headers = headers
self.body = body
self.connection = connection
def _parse_origin_path(self):
parsed_url = urllib.parse.urlparse(self.origin_path)
self._path = parsed_url.path
link_args = TolerantMagicDict()
for (query_name, query_value) in urllib.parse.parse_qsl(
parsed_url.query):
link_args.add(query_name, query_value)
self._link_args = link_args
@property
def cookies(self) -> HTTPCookies:
"""
Parse cookies and return cookies in a `HTTPCookies` instance.
"""
if not hasattr(self, "_cookies"):
cookies = HTTPCookies()
if "cookie" in self.headers:
for cookie_header in self.headers.get_list("cookie"):
cookies.load(cookie_header)
self._cookies = cookies
return self._cookies
@property
def path(self) -> str:
"""
Parse path and return the path in `str`.
"""
if not hasattr(self, "_path"):
self._parse_origin_path()
return self._path
@property
def host(self) -> str:
"""
Parse host and return the host in `str`.
"""
if not hasattr(self, "_host"):
self._host = self.headers.get_first("host")
return self._host
@property
def link_args(self) -> TolerantMagicDict:
"""
Parse link arguments and return link arguments in a
`TolerantMagicDict` instance.
"""
if not hasattr(self, "_link_args"):
self._parse_origin_path()
return self._link_args
@property
def body_args(self) -> Union[TolerantMagicDict, HTTPMultipartBody,
Mapping[Any, Any],
List[Any]]:
"""
Parse body arguments and return body arguments in a
proper instance.
"""
if not hasattr(self, "_body_args"):
content_type = self.headers.get_first("content-type")
if content_type.lower().strip() in (
"application/x-www-form-urlencoded", "application/x-url-encoded"):
self._body_args = TolerantMagicDict(
urllib.parse.parse_qsl(
ensure_str(self.body),
keep_blank_values=True,
strict_parsing=True))
elif content_type.lower().startswith(
"multipart/form-data"):
self._body_args = HTTPMultipartBody.parse(
content_type=content_type,
data=self.body)
elif content_type.lower().strip() == "application/json":
self._body_args = json.loads(ensure_str(self.body))
else: # Unknown Content Type.
raise ProtocolError("Unknown Body Type.")
return self._body_args
def __str__(self) -> str:
return ("HTTPIncomingRequest("
"method=%(method)s, "
"path=%(path)s, "
"http_version=%(http_version)s, "
"host=%(host)s, "
"headers=%(headers)s, "
"cookies=%(cookies)s, "
"link_args=%(link_args)s, "
")") % {
"method": repr(self.method),
"path": repr(self.path),
"http_version": repr(self.http_version),
"host": repr(self.host),
"headers": repr(self.headers),
"cookies": repr(self.cookies),
"link_args": repr(self.link_args)
}
__repr__ = __str__
class HTTPIncomingResponse(HTTPIncomingMessage):
"""
FutureFinity HTTP Incoming Response Class.
This is a subclass of the `HTTPIncomingMessage`.
This class represents a Incoming HTTP Response.
"""
def __init__(self, status_code: int, http_version: int=10,
headers: HTTPHeaders=None,
body: Optional[bytes]=None,
connection: "HTTPv1Connection"=None):
self.http_version = http_version
self.status_code = status_code
self.headers = headers
self.body = body
self.connection = connection
@property
def cookies(self) -> HTTPCookies:
"""
Parse cookies and return cookies in a `HTTPCookies` instance.
"""
if not hasattr(self, "_cookies"):
cookies = HTTPCookies()
if "set-cookie" in self.headers:
for cookie_header in self.headers.get_list("set-cookie"):
cookies.load(cookie_header)
self._cookies = cookies
return self._cookies
def __str__(self) -> str:
return ("HTTPIncomingResponse("
"status_code=%(status_code)s, "
"http_version=%(http_version)s, "
"headers=%(headers)s, "
"cookies=%(cookies)s, "
")") % {
"status_code": repr(self.status_code),
"http_version": repr(self.http_version),
"headers": repr(self.headers),
"cookies": repr(self.cookies)
}
__repr__ = __str__
class BaseHTTPConnectionController:
"""
FutureFinity Base HTTP Connection Controller Class.
This is the model controller to the HTTP Connections.
Any Connection Controllers should based on this class.
"""
def __init__(self, *args, **kwargs):
self.transport = None
self.use_stream = False
def initial_received(self, incoming: HTTPIncomingMessage):
"""
Triggered when the initial of a message is received.
"""
pass
def stream_received(self, incoming: HTTPIncomingMessage, data: bytes):
"""
Triggered when the stream of a message is received.
This will only be triggered when the message is detected as
a stream message.
"""
raise NotImplementedError("You should override stream_received.")
def error_received(self, incoming, exc: tuple):
"""
Triggered when errors received when errors occurred during parsing
the message.
"""
raise NotImplementedError("You should override error_received.")
def message_received(self, incoming: HTTPIncomingMessage):
"""
Triggered when a message is completely received.
This will not be triggered when the message is detected as
a stream message.
"""
raise NotImplementedError("You should override message_received.")
def set_timeout_handler(self, suggested_time: Optional[int]=None):
"""
Set a EventLoop.call_later instance, close transport after timeout.
"""
pass
def cancel_timeout_handler(self):
"""
Cancel the EventLoop.call_later instance, prevent transport be closed
accidently.
"""
pass
class ConnectionParseError(ProtocolError, ConnectionError):
"""
FutureFinity Connection Parse Error.
Any Connection Parse Errors is based on this class.
"""
pass
class ConnectionBadMessage(ConnectionParseError):
"""
FutureFinity Connection Bad Message Error.
This Error is raised when the message is not a valid message.
"""
pass
class ConnectionEntityTooLarge(ConnectionParseError):
"""
FutureFinity Connection Entity Too Large Error.
This Error is raised when the message too large that FutureFinity cannot
handle.
"""
pass
class HTTPv1Connection:
"""
FutureFinity HTTP v1 Connection Class.
This class will control and parse the http v1 connection.
"""
def __init__(self, controller: BaseHTTPConnectionController,
is_client: bool, http_version: int=10,
use_tls: bool=False, sockname: Optional[Tuple[str, int]]=None,
peername: Optional[Tuple[str, int]]=None,
allow_keep_alive: bool=True):
self.http_version = http_version
self.is_client = is_client
self.use_tls = use_tls
self.sockname = sockname
self.peername = peername
self.controller = controller
self.allow_keep_alive = allow_keep_alive
self.max_initial_length = _MAX_INITIAL_LENGTH
self.max_body_length = _MAX_BODY_LENGTH
self._pending_bytes = bytearray()
self._reset_connection()
def _reset_connection(self): # Reset Connection For Keep-Alive.
self.controller.set_timeout_handler()
self._use_keep_alive = None
self._body_length = None
self._next_chunk_length = None
self._pending_body = b""
self._parsed_incoming_info = {}
self.incoming = None
self._outgoing_chunked_body = False
self.stage = _CONN_INIT
@property
def _can_keep_alive(self):
if self.allow_keep_alive is False:
return False
if self.http_version == 10:
return False
if self._use_keep_alive is not None:
return self._use_keep_alive
return True
def _parse_initial(self):
initial_end = self._pending_bytes.find(_CRLF_BYTES_MARK * 2)
if initial_end == -1:
if len(self._pending_bytes) > self.max_initial_length:
raise ConnectionEntityTooLarge(
"Initial Exceed its Maximum Length.")
return
initial_end += 2
if initial_end > self.max_initial_length:
raise ConnectionEntityTooLarge(
"Initial Exceed its Maximum Length.")
return
pending_initial = ensure_bytes(self._pending_bytes[:initial_end])
del self._pending_bytes[:initial_end + 2]
basic_info, origin_headers = ensure_str(pending_initial).split(
_CRLF_MARK, 1)
basic_info = basic_info.split(" ")
if self.is_client:
http_version = basic_info[0]
if not basic_info[1].isdecimal():
raise ConnectionBadMessage("Bad Initial Received.")
self._parsed_incoming_info["status_code"] = int(basic_info[1])
else:
if len(basic_info) != 3:
raise ConnectionBadMessage("Bad Initial Received.")
method, origin_path, http_version = basic_info
self._parsed_incoming_info["method"] = basic_info[0]
self._parsed_incoming_info["origin_path"] = basic_info[1]
if http_version.lower() == "http/1.1":
self.http_version = 11
elif http_version.lower() == "http/1.0":
self.http_version = 10
else:
raise ConnectionBadMessage("Unknown HTTP Version.")
self._parsed_incoming_info["http_version"] = self.http_version
try:
headers = HTTPHeaders.parse(origin_headers)
except:
raise ConnectionBadMessage("Bad Headers Received.")
if self._can_keep_alive and "connection" in headers:
self._use_keep_alive = headers.get_first(
"connection").lower() == "keep-alive"
self._parsed_incoming_info["headers"] = headers
if self.is_client:
try:
self.incoming = HTTPIncomingResponse(
**self._parsed_incoming_info)
except:
raise ConnectionBadMessage("Bad Initial Received.")
else:
try:
self.incoming = HTTPIncomingRequest(
**self._parsed_incoming_info, connection=self)
except:
raise ConnectionBadMessage("Bad Initial Received.")
self.stage = _CONN_INITIAL_PARSED
def _parse_next_chunk(self):
if self._body_length is None:
self._body_length = 0
while True:
if self._next_chunk_length is None:
length_end = self._pending_bytes.find(_CRLF_BYTES_MARK)
if length_end == -1:
if len(self._pending_bytes) > 10:
# FFFFFFFF\r\n is about 4GB, FutureFinity can only
# handle files less than 50MB by default.
raise ConnectionEntityTooLarge(
"The body is too large.")
return
length_bytes = self._pending_bytes[:length_end]
del self._pending_bytes[:length_end + 2]
try:
self._next_chunk_length = int(length_bytes, 16)
except ValueError: # Not Valid Hexadecimal bytes
raise ConnectionBadMessage(
"Bad Chunk Length Received.")
if self._next_chunk_length > self.max_body_length:
raise ConnectionEntityTooLarge(
"The body is too large.")
if len(self._pending_bytes) < self._next_chunk_length + 2:
return # Data not enough.
if self._next_chunk_length == 0:
del self._pending_bytes[:2]
self.incoming.body = self._pending_body
self.stage = _CONN_MESSAGE_PARSED
return # Parse Finished.
self._pending_body += self._pending_bytes[:self._next_chunk_length]
del self._pending_bytes[:self._next_chunk_length + 2]
self._body_length += self._next_chunk_length
self._next_chunk_length = None
if self._body_length > self.max_body_length:
raise ConnectionEntityTooLarge(
"The body is too large.")
def _parse_body(self):
if self.incoming._is_chunked_body:
self._parse_next_chunk()
return
if self._body_length is None:
self._body_length = self.incoming._expected_content_length
if self.is_client is True:
if self._body_length == -1:
return # Waiting For Connection Close.
if self._body_length > self.max_body_length:
raise ConnectionEntityTooLarge("The body is too large.")
if len(self._pending_bytes) < self._body_length:
return # Data not enough, waiting.
self._pending_body = ensure_bytes(
self._pending_bytes[:self._body_length])
del self._pending_bytes[:self._body_length]
self.incoming.body = self._pending_body
self.stage = _CONN_MESSAGE_PARSED
def data_received(self, data: bytes):
"""
Trigger this function when data is received from the remote.
"""
if not data:
return # Nothing received, nothing is going to happen.
self._pending_bytes += data
try:
self._parse_incoming_message()
except:
if self.is_client:
self._close_connection()
else:
self.stage = _CONN_MESSAGE_PARSED
self.controller.error_received(self.incoming, sys.exc_info())
def _parse_incoming_message(self):
self.controller.cancel_timeout_handler()
if self.is_client is False:
if self.stage is _CONN_INIT:
self.stage = _CONN_INITIAL_WAITING
if self.stage is _CONN_INITIAL_WAITING:
self._parse_initial()
if self.stage is _CONN_INITIAL_PARSED:
self.controller.initial_received(self.incoming)
if self.controller.use_stream:
self.stage = _CONN_STREAMED
elif not self.incoming._body_expected:
self.stage = _CONN_MESSAGE_PARSED
else:
self.stage = _CONN_BODY_WAITING
if not self.incoming._is_chunked_body:
if (self.incoming._expected_content_length == -1 and
not self.is_client):
raise ConnectionBadMessage(
"Method Request a body, "
"but we cannot find a way to detect body length.")
if self.stage is _CONN_STREAMED:
self.controller.stream_received(self.incoming,
ensure_bytes(self._pending_bytes))
self._pending_bytes.clear()
return
if self.stage is _CONN_BODY_WAITING:
self._parse_body()
if self.stage is _CONN_MESSAGE_PARSED:
self.controller.message_received(self.incoming)
if self.is_client:
if self._can_keep_alive:
self._reset_connection()
else:
self._close_connection()
return
def write_initial(
self, http_version: Optional[int]=None, method: str="GET",
path: str="/", status_code: int=200,
headers: Optional[HTTPHeaders]=None):
"""
Write the initial to remote.
"""
initial = b""
if http_version is not None:
self.http_version = http_version
if self.http_version == 11:
http_version_text = "HTTP/1.1"
elif self.http_version == 10:
http_version_text = "HTTP/1.0"
else:
raise ProtocolError("Unknown HTTP Version.")
basic_info_template = b"%s %s %s" + _CRLF_BYTES_MARK
if self.is_client:
if self.stage is not _CONN_INIT:
raise ProtocolError(
"Cannot write when connection stage is not _CONN_INIT.")
basic_info = basic_info_template % (
ensure_bytes(method), ensure_bytes(path),
ensure_bytes(http_version_text))
else:
if self.stage is not _CONN_MESSAGE_PARSED:
raise ProtocolError("Unacceptable Function Access.")
basic_info = basic_info_template % (
ensure_bytes(http_version_text), ensure_bytes(status_code),
ensure_bytes(status_code_text[status_code]))
initial += basic_info
if self._can_keep_alive and "connection" in headers:
self._use_keep_alive = headers.get_first(
"connection").lower() == "keep-alive"
transfer_encoding = headers.get_first("transfer-encoding")
if transfer_encoding is not None:
if transfer_encoding.lower() == "chunked":
self._outgoing_chunked_body = True
else:
self._outgoing_chunked_body = False
else:
self._outgoing_chunked_body = False
if "connection" not in headers.keys():
if self._can_keep_alive:
headers["connection"] = "Keep-Alive"
else:
headers["connection"] = "Close"
else:
self._use_keep_alive = headers[
"connection"].lower() == "keep-alive"
if self.is_client:
if "accept" not in headers.keys():
headers["accept"] = "*/*"
if "user-agent" not in headers.keys():
headers["user-agent"] = "futurefinity/" + futurefinity.version
else:
if "server" not in headers.keys():
headers["server"] = "futurefinity/" + futurefinity.version
if method.lower() == "head":
# For Head Request, there will not be a body.
self._outgoing_chunked_body = False
initial += headers.assemble()
initial += _CRLF_BYTES_MARK
self.controller.transport.write(initial)
self.stage = _CONN_INITIAL_WRITTEN
def write_body(self, body: bytes):
"""
Write the body to remote.
This can be triggered for many times. until `finish_writing`
is triggered.
"""
if self.stage not in (_CONN_INITIAL_WRITTEN, _CONN_BODY_WRITTEN):
raise ProtocolError("Invalid Function Access.")
self.stage = _CONN_BODY_WRITTEN
if self._outgoing_chunked_body:
self._write_body_chunk(body)
return
self.controller.transport.write(body)
def _write_body_chunk(self, body_chunk: bytes):
if not self._outgoing_chunked_body:
raise ProtocolError("Invalid Function Access.")
if not body_chunk:
return
# Prevent Body being finished accidentally.
# Finish Body Writing by HTTPv1Connection.finish_writing
chunk_bytes = b""
body_chunk_length = len(body_chunk)
chunk_bytes += ensure_bytes(hex(body_chunk_length)[2:].upper())
chunk_bytes += _CRLF_BYTES_MARK
chunk_bytes += body_chunk
chunk_bytes += _CRLF_BYTES_MARK
self.controller.transport.write(chunk_bytes)
def finish_writing(self):
"""
Trigger this function when everything is written.
It will reset the connection or close it.
"""
if self._outgoing_chunked_body:
self.controller.transport.write(b"0" + _CRLF_BYTES_MARK * 2)
if self.is_client:
self.stage = _CONN_INITIAL_WAITING
else:
if self._can_keep_alive:
self._reset_connection()
else:
self._close_connection()
def connection_lost(self, exc: Optional[tuple]=None):
"""
Triggered when remote is closed.
"""
if self.stage is _CONN_CLOSED:
return # This connection has been closed.
if self.stage is _CONN_INIT:
self.stage = _CONN_CLOSED
return # This connection has nothing, so nothing to cleanup.
if self.is_client:
if self.stage is _CONN_BODY_WAITING:
self._pending_body = ensure_bytes(self._pending_bytes)
self._pending_bytes.clear()
self.incoming.body = self._pending_body
self.stage = _CONN_MESSAGE_PARSED
self._parse_incoming_message() # Trigger Message Received.
self._close_connection()
def _close_connection(self): # Close Connection.
self.controller.cancel_timeout_handler()
if self.controller.transport:
self.controller.transport.close()
self.stage = _CONN_CLOSED
|
|
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import logbook
import numpy as np
from collections import namedtuple
from math import isnan
from zipline.finance.performance.position import Position
from zipline.finance.transaction import Transaction
try:
# optional cython based OrderedDict
from cyordereddict import OrderedDict
except ImportError:
from collections import OrderedDict
from six import iteritems, itervalues
import zipline.protocol as zp
from zipline.assets import (
Equity, Future
)
from zipline.errors import PositionTrackerMissingAssetFinder
from . position import positiondict
log = logbook.Logger('Performance')
PositionStats = namedtuple('PositionStats',
['net_exposure',
'gross_value',
'gross_exposure',
'short_value',
'short_exposure',
'shorts_count',
'long_value',
'long_exposure',
'longs_count',
'net_value'])
def calc_position_values(amounts,
last_sale_prices,
value_multipliers):
iter_amount_price_multiplier = zip(
amounts,
last_sale_prices,
itervalues(value_multipliers),
)
return [
price * amount * multiplier for
price, amount, multiplier in iter_amount_price_multiplier
]
def calc_net(values):
# Returns 0.0 if there are no values.
return sum(values, np.float64())
def calc_position_exposures(amounts,
last_sale_prices,
exposure_multipliers):
iter_amount_price_multiplier = zip(
amounts,
last_sale_prices,
itervalues(exposure_multipliers),
)
return [
price * amount * multiplier for
price, amount, multiplier in iter_amount_price_multiplier
]
def calc_long_value(position_values):
return sum(i for i in position_values if i > 0)
def calc_short_value(position_values):
return sum(i for i in position_values if i < 0)
def calc_long_exposure(position_exposures):
return sum(i for i in position_exposures if i > 0)
def calc_short_exposure(position_exposures):
return sum(i for i in position_exposures if i < 0)
def calc_longs_count(position_exposures):
return sum(1 for i in position_exposures if i > 0)
def calc_shorts_count(position_exposures):
return sum(1 for i in position_exposures if i < 0)
def calc_gross_exposure(long_exposure, short_exposure):
return long_exposure + abs(short_exposure)
def calc_gross_value(long_value, short_value):
return long_value + abs(short_value)
class PositionTracker(object):
def __init__(self, asset_finder, data_frequency):
self.asset_finder = asset_finder
# sid => position object
self.positions = positiondict()
# Arrays for quick calculations of positions value
self._position_value_multipliers = OrderedDict()
self._position_exposure_multipliers = OrderedDict()
self._unpaid_dividends = {}
self._unpaid_stock_dividends = {}
self._positions_store = zp.Positions()
self.data_frequency = data_frequency
def _update_asset(self, sid):
try:
self._position_value_multipliers[sid]
self._position_exposure_multipliers[sid]
except KeyError:
# Check if there is an AssetFinder
if self.asset_finder is None:
raise PositionTrackerMissingAssetFinder()
# Collect the value multipliers from applicable sids
asset = self.asset_finder.retrieve_asset(sid)
if isinstance(asset, Equity):
self._position_value_multipliers[sid] = 1
self._position_exposure_multipliers[sid] = 1
if isinstance(asset, Future):
self._position_value_multipliers[sid] = 0
self._position_exposure_multipliers[sid] = asset.multiplier
def update_positions(self, positions):
# update positions in batch
self.positions.update(positions)
for sid, pos in iteritems(positions):
self._update_asset(sid)
def update_position(self, sid, amount=None, last_sale_price=None,
last_sale_date=None, cost_basis=None):
if sid not in self.positions:
position = Position(sid)
self.positions[sid] = position
else:
position = self.positions[sid]
if amount is not None:
position.amount = amount
self._update_asset(sid=sid)
if last_sale_price is not None:
position.last_sale_price = last_sale_price
if last_sale_date is not None:
position.last_sale_date = last_sale_date
if cost_basis is not None:
position.cost_basis = cost_basis
def execute_transaction(self, txn):
# Update Position
# ----------------
sid = txn.sid
if sid not in self.positions:
position = Position(sid)
self.positions[sid] = position
else:
position = self.positions[sid]
position.update(txn)
if position.amount == 0:
# if this position now has 0 shares, remove it from our internal
# bookkeeping.
del self.positions[sid]
try:
# if this position exists in our user-facing dictionary,
# remove it as well.
del self._positions_store[sid]
except KeyError:
pass
self._update_asset(sid)
def handle_commission(self, sid, cost):
# Adjust the cost basis of the stock if we own it
if sid in self.positions:
self.positions[sid].adjust_commission_cost_basis(sid, cost)
def handle_splits(self, splits):
"""
Processes a list of splits by modifying any positions as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (sid, ratio).
Returns
-------
int: The leftover cash from fractional sahres after modifying each
position.
"""
total_leftover_cash = 0
for split in splits:
sid = split[0]
if sid in self.positions:
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[sid]
leftover_cash = position.handle_split(sid, split[1])
self._update_asset(split[0])
total_leftover_cash += leftover_cash
return total_leftover_cash
def earn_dividends(self, dividends, stock_dividends):
"""
Given a list of dividends whose ex_dates are all the next trading day,
calculate and store the cash and/or stock payments to be paid on each
dividend's pay date.
Parameters
----------
dividends: iterable of (asset, amount, pay_date) namedtuples
stock_dividends: iterable of (asset, payment_asset, ratio, pay_date)
namedtuples.
"""
for dividend in dividends:
# Store the earned dividends so that they can be paid on the
# dividends' pay_dates.
div_owed = self.positions[dividend.asset].earn_dividend(dividend)
try:
self._unpaid_dividends[dividend.pay_date].append(div_owed)
except KeyError:
self._unpaid_dividends[dividend.pay_date] = [div_owed]
for stock_dividend in stock_dividends:
div_owed = \
self.positions[stock_dividend.asset].earn_stock_dividend(
stock_dividend)
try:
self._unpaid_stock_dividends[stock_dividend.pay_date].\
append(div_owed)
except KeyError:
self._unpaid_stock_dividends[stock_dividend.pay_date] = \
[div_owed]
def pay_dividends(self, next_trading_day):
"""
Returns a cash payment based on the dividends that should be paid out
according to the accumulated bookkeeping of earned, unpaid, and stock
dividends.
"""
net_cash_payment = 0.0
try:
payments = self._unpaid_dividends[next_trading_day]
# Mark these dividends as paid by dropping them from our unpaid
del self._unpaid_dividends[next_trading_day]
except KeyError:
payments = []
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
for payment in payments:
net_cash_payment += payment['amount']
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
try:
stock_payments = self._unpaid_stock_dividends[next_trading_day]
except:
stock_payments = []
for stock_payment in stock_payments:
payment_asset = stock_payment['payment_asset']
share_count = stock_payment['share_count']
# note we create a Position for stock dividend if we don't
# already own the asset
if payment_asset in self.positions:
position = self.positions[payment_asset]
else:
position = self.positions[payment_asset] = \
Position(payment_asset)
position.amount += share_count
self._update_asset(payment_asset)
return net_cash_payment
def maybe_create_close_position_transaction(self, asset, dt, data_portal):
if not self.positions.get(asset):
return None
amount = self.positions.get(asset).amount
price = data_portal.get_spot_value(
asset, 'price', dt, self.data_frequency)
# Get the last traded price if price is no longer available
if isnan(price):
price = self.positions.get(asset).last_sale_price
txn = Transaction(
sid=asset,
amount=(-1 * amount),
dt=dt,
price=price,
commission=0,
order_id=None,
)
return txn
def get_positions(self):
positions = self._positions_store
for sid, pos in iteritems(self.positions):
if pos.amount == 0:
# Clear out the position if it has become empty since the last
# time get_positions was called. Catching the KeyError is
# faster than checking `if sid in positions`, and this can be
# potentially called in a tight inner loop.
try:
del positions[sid]
except KeyError:
pass
continue
position = zp.Position(sid)
position.amount = pos.amount
position.cost_basis = pos.cost_basis
position.last_sale_price = pos.last_sale_price
position.last_sale_date = pos.last_sale_date
# Adds the new position if we didn't have one before, or overwrite
# one we have currently
positions[sid] = position
return positions
def get_positions_list(self):
positions = []
for sid, pos in iteritems(self.positions):
if pos.amount != 0:
positions.append(pos.to_dict())
return positions
def sync_last_sale_prices(self, dt, handle_non_market_minutes,
data_portal):
if not handle_non_market_minutes:
for asset, position in iteritems(self.positions):
last_sale_price = data_portal.get_spot_value(
asset, 'price', dt, self.data_frequency
)
if not np.isnan(last_sale_price):
position.last_sale_price = last_sale_price
else:
for asset, position in iteritems(self.positions):
last_sale_price = data_portal.get_adjusted_value(
asset,
'price',
data_portal.trading_calendar.previous_minute(dt),
dt,
self.data_frequency
)
if not np.isnan(last_sale_price):
position.last_sale_price = last_sale_price
def stats(self):
amounts = []
last_sale_prices = []
for pos in itervalues(self.positions):
amounts.append(pos.amount)
last_sale_prices.append(pos.last_sale_price)
position_values = calc_position_values(
amounts,
last_sale_prices,
self._position_value_multipliers
)
position_exposures = calc_position_exposures(
amounts,
last_sale_prices,
self._position_exposure_multipliers
)
long_value = calc_long_value(position_values)
short_value = calc_short_value(position_values)
gross_value = calc_gross_value(long_value, short_value)
long_exposure = calc_long_exposure(position_exposures)
short_exposure = calc_short_exposure(position_exposures)
gross_exposure = calc_gross_exposure(long_exposure, short_exposure)
net_exposure = calc_net(position_exposures)
longs_count = calc_longs_count(position_exposures)
shorts_count = calc_shorts_count(position_exposures)
net_value = calc_net(position_values)
return PositionStats(
long_value=long_value,
gross_value=gross_value,
short_value=short_value,
long_exposure=long_exposure,
short_exposure=short_exposure,
gross_exposure=gross_exposure,
net_exposure=net_exposure,
longs_count=longs_count,
shorts_count=shorts_count,
net_value=net_value
)
|
|
# Copyright (c) 2022 Matt Colligan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from typing import TYPE_CHECKING
from pywayland.protocol.wayland import WlKeyboard
from wlroots import ffi, lib
from wlroots.wlr_types import input_device
from xkbcommon import xkb
from libqtile import configurable
from libqtile.backend.wayland.wlrq import HasListeners, buttons
from libqtile.log_utils import logger
try:
from libqtile.backend.wayland._libinput import lib as libinput # type: ignore
except ImportError:
# We want to continue without erroring here, so that the docs can build without the
# hard dependency of wlroots.
libinput = None
if TYPE_CHECKING:
from typing import Any
from pywayland.server import Listener
from wlroots.wlr_types import InputDevice
from wlroots.wlr_types.keyboard import KeyboardKeyEvent
from libqtile.backend.wayland.core import Core
KEY_PRESSED = WlKeyboard.key_state.pressed
KEY_RELEASED = WlKeyboard.key_state.released
# Keep this around instead of creating it on every key
xkb_keysym = ffi.new("const xkb_keysym_t **")
class InputConfig(configurable.Configurable):
"""
This is used to configure input devices. An instance of this class represents one
set of settings that can be applied to an input device.
To use this, define a dictionary called ``wl_input_rules`` in your config. The keys
are used to match input devices, and the values are instances of this class with the
desired settings. For example:
.. code-block:: python
from libqtile.backend.wayland import InputConfig
wl_input_rules = {
"1267:12377:ELAN1300:00 04F3:3059 Touchpad": InputConfig(left_handed=True),
"*": InputConfig(left_handed=True, pointer_accel=True),
"type:keyboard": InputConfig(xkb_options="caps:swapescape"),
}
When a input device is being configured, the most specific matching key in the
dictionary is found and the corresponding settings are used to configure the device.
Unique identifiers are chosen first, then ``"type:X"``, then ``"*"``.
Options default to ``None``, leave a device's default settings intact. For
information on what each option does, see the documenation for libinput:
https://wayland.freedesktop.org/libinput/doc/latest/configuration.html. Note that
devices often only support a subset of settings.
This tries to mirror how Sway configures libinput devices. For more information
check out sway-input(5): https://man.archlinux.org/man/sway-input.5#LIBINPUT_CONFIGURATION
Keyboards, managed by `xkbcommon <https://github.com/xkbcommon/libxkbcommon>`_, are
configured with the options prefixed by ``kb_``. X11's helpful `XKB guide
<https://www.x.org/releases/X11R7.5/doc/input/XKB-Config.html>`_ may be useful for
figuring out the syntax for some of these settings.
"""
defaults = [
("accel_profile", None, "``'adaptive'`` or ``'flat'``"),
("click_method", None, "``'none'``, ``'button_areas'`` or ``'clickfinger'``"),
("drag", None, "``True`` or ``False``"),
("drag_lock", None, "``True`` or ``False``"),
("dwt", None, "True or False"),
("left_handed", None, "``True`` or ``False``"),
("middle_emulation", None, "``True`` or ``False``"),
("natural_scroll", None, "``True`` or ``False``"),
("pointer_accel", None, "A ``float`` between -1 and 1."),
("scroll_button", None, "``'disable'``, 'Button[1-3,8,9]' or a keycode"),
(
"scroll_method",
None,
"``'none'``, ``'two_finger'``, ``'edge'``, or ``'on_button_down'``",
),
("tap", None, "``True`` or ``False``"),
("tap_button_map", None, "``'lrm'`` or ``'lmr'``"),
("kb_layout", None, "Keyboard layout i.e. ``XKB_DEFAULT_LAYOUT``"),
("kb_options", None, "Keyboard options i.e. ``XKB_DEFAULT_OPTIONS``"),
("kb_variant", None, "Keyboard variant i.e. ``XKB_DEFAULT_VARIANT``"),
("kb_repeat_rate", 25, "Keyboard key repeats made per second"),
("kb_repeat_delay", 600, "Keyboard delay in milliseconds before repeating"),
]
def __init__(self, **config: dict[str, Any]) -> None:
configurable.Configurable.__init__(self, **config)
self.add_defaults(InputConfig.defaults)
if libinput:
ACCEL_PROFILES = {
"adaptive": libinput.LIBINPUT_CONFIG_ACCEL_PROFILE_ADAPTIVE,
"flat": libinput.LIBINPUT_CONFIG_ACCEL_PROFILE_FLAT,
}
CLICK_METHODS = {
"none": libinput.LIBINPUT_CONFIG_CLICK_METHOD_NONE,
"button_areas": libinput.LIBINPUT_CONFIG_CLICK_METHOD_BUTTON_AREAS,
"clickfinger": libinput.LIBINPUT_CONFIG_CLICK_METHOD_CLICKFINGER,
}
TAP_MAPS = {
"lrm": libinput.LIBINPUT_CONFIG_TAP_MAP_LRM,
"lmr": libinput.LIBINPUT_CONFIG_TAP_MAP_LMR,
}
SCROLL_METHODS = {
"none": libinput.LIBINPUT_CONFIG_SCROLL_NO_SCROLL,
"two_finger": libinput.LIBINPUT_CONFIG_SCROLL_2FG,
"edge": libinput.LIBINPUT_CONFIG_SCROLL_EDGE,
"on_button_down": libinput.LIBINPUT_CONFIG_SCROLL_ON_BUTTON_DOWN,
}
class Keyboard(HasListeners):
def __init__(self, core: Core, device: InputDevice):
self.core = core
self.device = device
self.qtile = core.qtile
self.seat = core.seat
self.keyboard = device.keyboard
self.keyboard.data = self
self.grabbed_keys = core.grabbed_keys
self.keyboard.set_repeat_info(25, 600)
self.xkb_context = xkb.Context()
self._keymaps: dict[tuple[str | None, str | None, str | None], xkb.Keymap] = {}
self.set_keymap(None, None, None)
self.add_listener(self.keyboard.modifiers_event, self._on_modifier)
self.add_listener(self.keyboard.key_event, self._on_key)
self.add_listener(self.keyboard.destroy_event, self._on_destroy)
def finalize(self) -> None:
self.finalize_listeners()
self.core.keyboards.remove(self)
if self.core.keyboards and self.core.seat.keyboard.destroyed:
self.seat.set_keyboard(self.core.keyboards[-1].device)
def set_keymap(self, layout: str | None, options: str | None, variant: str | None) -> None:
"""
Set the keymap for this keyboard.
"""
if (layout, options, variant) in self._keymaps:
keymap = self._keymaps[(layout, options, variant)]
else:
keymap = self.xkb_context.keymap_new_from_names(
layout=layout, options=options, variant=variant
)
self._keymaps[(layout, options, variant)] = keymap
self.keyboard.set_keymap(keymap)
def _on_destroy(self, _listener: Listener, _data: Any) -> None:
logger.debug("Signal: keyboard destroy")
self.finalize()
def _on_modifier(self, _listener: Listener, _data: Any) -> None:
self.seat.set_keyboard(self.device)
self.seat.keyboard_notify_modifiers(self.keyboard.modifiers)
def _on_key(self, _listener: Listener, event: KeyboardKeyEvent) -> None:
if self.qtile is None:
# shushes mypy
self.qtile = self.core.qtile
assert self.qtile is not None
self.core.idle.notify_activity(self.seat)
if event.state == KEY_PRESSED:
# translate libinput keycode -> xkbcommon
keycode = event.keycode + 8
layout_index = lib.xkb_state_key_get_layout(self.keyboard._ptr.xkb_state, keycode)
nsyms = lib.xkb_keymap_key_get_syms_by_level(
self.keyboard._ptr.keymap,
keycode,
layout_index,
0,
xkb_keysym,
)
keysyms = [xkb_keysym[0][i] for i in range(nsyms)]
mods = self.keyboard.modifier
for keysym in keysyms:
if (keysym, mods) in self.grabbed_keys:
self.qtile.process_key_event(keysym, mods)
return
if self.core.focused_internal:
self.core.focused_internal.process_key_press(keysym)
return
self.seat.keyboard_notify_key(event)
def _configure_keyboard(device: InputDevice, conf: InputConfig) -> None:
"""Applies ``InputConfig`` rules to a keyboard device"""
device.keyboard.set_repeat_info(conf.kb_repeat_rate, conf.kb_repeat_delay)
if isinstance(device.keyboard.data, Keyboard):
device.keyboard.data.set_keymap(conf.kb_layout, conf.kb_options, conf.kb_variant)
else:
logger.error("Couldn't configure keyboard. Please report this.")
_logged_unsupported = False
def _configure_pointer(device: InputDevice, conf: InputConfig, name: str) -> None:
"""Applies ``InputConfig`` rules to a pointer device"""
handle = device.libinput_get_device_handle()
if handle is None:
logger.debug(f"Device not handled by libinput: {name}")
return
if libinput is None:
global _logged_unsupported
if not _logged_unsupported:
logger.error(
"Qtile was not built with libinput configuration support. "
"For support, pywlroots must be installed at build time."
)
_logged_unsupported = True
return
if libinput.libinput_device_config_accel_is_available(handle):
if ACCEL_PROFILES.get(conf.accel_profile):
libinput.libinput_device_config_accel_set_profile(
handle, ACCEL_PROFILES.get(conf.accel_profile)
)
if conf.pointer_accel is not None:
libinput.libinput_device_config_accel_set_speed(handle, conf.pointer_accel)
if CLICK_METHODS.get(conf.click_method):
libinput.libinput_device_config_click_set_method(
handle, CLICK_METHODS.get(conf.click_method)
)
if conf.drag is not None:
libinput.libinput_device_config_tap_set_drag_enabled(handle, int(conf.drag))
if conf.drag_lock is not None:
libinput.libinput_device_config_tap_set_drag_lock_enabled(handle, int(conf.drag_lock))
if conf.dwt is not None:
if libinput.libinput_device_config_dwt_is_available(handle):
libinput.libinput_device_config_dwt_set_enabled(handle, int(conf.dwt))
if conf.left_handed is not None:
if libinput.libinput_device_config_left_handed_is_available(handle):
libinput.libinput_device_config_left_handed_set(handle, int(conf.left_handed))
if conf.middle_emulation is not None:
libinput.libinput_device_config_middle_emulation_set_enabled(
handle, int(conf.middle_emulation)
)
if conf.natural_scroll is not None:
if libinput.libinput_device_config_scroll_has_natural_scroll(handle):
libinput.libinput_device_config_scroll_set_natural_scroll_enabled(
handle, int(conf.natural_scroll)
)
if SCROLL_METHODS.get(conf.scroll_method):
libinput.libinput_device_config_scroll_set_method(
handle, SCROLL_METHODS.get(conf.scroll_method)
)
if conf.scroll_method == "on_button_down":
if isinstance(conf.scroll_button, str):
if conf.scroll_button == "disable":
button = 0
else: # e.g. Button1
button = buttons[int(conf.scroll_button[-1]) - 1]
else:
button = conf.scroll_button
libinput.libinput_device_config_scroll_set_button(handle, button)
if libinput.libinput_device_config_tap_get_finger_count(handle) > 1:
if conf.tap is not None:
libinput.libinput_device_config_tap_set_enabled(handle, int(conf.tap))
if conf.tap_button_map is not None:
if TAP_MAPS.get(conf.tap_button_map):
libinput.libinput_device_config_tap_set_button_map(
handle, TAP_MAPS.get(conf.tap_button_map)
)
def configure_device(device: InputDevice, configs: dict[str, InputConfig]) -> None:
if not configs:
return
# Find a matching InputConfig
name = device.name
if name == " " or not name.isprintable():
name = "_"
identifier = "%d:%d:%s" % (device.vendor, device.product, name)
type_key = "type:" + device.device_type.name.lower()
if type_key == "type:pointer":
# This checks whether the pointer is a touchpad.
handle = device.libinput_get_device_handle()
if handle and libinput.libinput_device_config_tap_get_finger_count(handle) > 0:
type_key = "type:touchpad"
if identifier in configs:
conf = configs[identifier]
elif type_key in configs:
conf = configs[type_key]
elif "*" in configs:
conf = configs["*"]
else:
return
if device.device_type == input_device.InputDeviceType.POINTER:
_configure_pointer(device, conf, name)
elif device.device_type == input_device.InputDeviceType.KEYBOARD:
_configure_keyboard(device, conf)
else:
logger.warning("Device not configured. Type '%s' not recognised.", device.device_type)
|
|
# -*- coding: utf-8 -*-
from typing import NamedTuple, Tuple
import pytest
import numpy as np
class UnivariateData(NamedTuple):
x: np.ndarray
y: np.ndarray
xi: np.ndarray
yi: np.ndarray
yi_d1: np.ndarray
yi_d2: np.ndarray
yi_ad1: np.ndarray
integral: float
smooth: float
@pytest.fixture(scope='session')
def univariate_data() -> UnivariateData:
"""Univariate exponential noisy data sample
"""
x = np.linspace(-5., 5., 25)
y = np.array([
0.015771474002402, 0.161329316958106, 0.133494845724251, 0.281006799675995, 0.343006057841707,
0.278153538271205, 0.390818717714371, 0.679913441859782, 0.868622194535066, 0.981580573494033,
0.942184497801730, 1.062903014773386, 1.145038880551641, 1.126415085211218, 0.945914543251488,
0.887159638891092, 0.732105338022297, 0.443482323476481, 0.539727427655155, 0.461168113877247,
0.218479110576478, 0.230018078091912, 0.024790896515009, 0.085343887446749, 0.238257669483491,
])
xi = np.linspace(-5., 5., 100)
yi = np.array([
0.027180620841235, 0.055266722842603, 0.081889893919483, 0.105587203147386, 0.124895719601823,
0.138845028755704, 0.149340839533796, 0.159329894062361, 0.171760370375527, 0.189200881318870,
0.210916416800576, 0.234470952365328, 0.257414405587860, 0.277378327575793, 0.293102526279226,
0.304125512134026, 0.310003428419162, 0.310378253365020, 0.306866169084541, 0.303057561573221,
0.302628651777970, 0.309224604926640, 0.325083877194873, 0.350493015304832, 0.385594789501554,
0.430522770904909, 0.484297436489629, 0.543777468816333, 0.605573174066145, 0.666295736381613,
0.723192861937517, 0.775270813640449, 0.821836995165352, 0.862198810187169, 0.895800934807012,
0.922637134830661, 0.942838448490065, 0.956535914017174, 0.964201067822575, 0.968293836555378,
0.971993858758319, 0.978481765680190, 0.990589029304687, 1.008108142826666, 1.029266848660349,
1.052279957395322, 1.075372085392223, 1.096900972159461, 1.115320296869396, 1.129085856740936,
1.136683629726760, 1.137293750656333, 1.130790511235904, 1.117078383905495, 1.096086194963815,
1.068845910308547, 1.037920180955498, 1.005984407266240, 0.975701613072527, 0.948237262097737,
0.921848333557257, 0.894457640361302, 0.863988793631958, 0.828944108099870, 0.789424716879042,
0.745805539756215, 0.698461496518133, 0.648486500475627, 0.599850439035886, 0.557242193130187,
0.525350643689810, 0.507735527292642, 0.501362772528695, 0.500811632605449, 0.500658068764342,
0.495835799645429, 0.484392358284163, 0.465978560872775, 0.440258473877521, 0.407065767289394,
0.368536648551231, 0.328466725991911, 0.290688242301654, 0.258885509945327, 0.233340446204702,
0.210932573178451, 0.188393482739897, 0.162519263868190, 0.133027485558460, 0.103689479346398,
0.078575174479879, 0.061741087177262, 0.055620757085748, 0.059495661916820, 0.072285127585087,
0.092908288203052, 0.120145221354215, 0.152391824997807, 0.187978208969687, 0.225234483105709,
])
yi_d1 = np.array([
0.280466245838951, 0.273224737762734, 0.251500213534080, 0.215292673152991, 0.164602116619466,
0.116145957650883, 0.096535589511642, 0.106112183910259, 0.144875740846734, 0.197795478854448,
0.228129487681658, 0.234207680498741, 0.216030057305698, 0.177475843585326, 0.133130927152951,
0.084391829182380, 0.031258549673615, -0.021170315259270, -0.042303188931811, -0.027041475229932,
0.024614825846369, 0.110206101347581, 0.204043060467737, 0.299293445013758, 0.395957254985645,
0.492941634313625, 0.566203749462694, 0.605907895804895, 0.612054073340229, 0.584850885702982,
0.540569416367585, 0.489431243414127, 0.431436366842608, 0.366584786653029, 0.298953493902567,
0.232619479648399, 0.167582743890526, 0.103843286628946, 0.052958588898598, 0.033326273571420,
0.045182207607307, 0.088526391006262, 0.149532116870303, 0.194400768337705, 0.221596044642023,
0.231117945783259, 0.223432442001698, 0.200291581400812, 0.161863113267105, 0.108147037600576,
0.040936089987355, -0.029013316055774, -0.099908444942684, -0.171749296673372, -0.242607045639946,
-0.292336179937763, -0.315578850656009, -0.312335057794686, -0.284075133096558, -0.263146374902438,
-0.262781768897203, -0.282981315080855, -0.323555358312161, -0.369710797575063, -0.412154530949217,
-0.450886558434622, -0.485906880031279, -0.495861388073734, -0.459395974896536, -0.376510640499682,
-0.247205384883174, -0.109806593527812, -0.025323614749973, 0.005461380318084, -0.017451608323641,
-0.079895387689795, -0.147239041593955, -0.217908437482194, -0.291903575354511, -0.361164895715347,
-0.395388454861403, -0.391672811374275, -0.350017965253966, -0.279210971886386, -0.231974163587015,
-0.217094595741764, -0.234572268350635, -0.279299010631809, -0.297922816641862, -0.276254323097967,
-0.214293530000122, -0.113633905254560, -0.009331742798295, 0.084271746212620, 0.167176561778182,
0.239337131717303, 0.297198825904872, 0.338528607467420, 0.363326476404950, 0.371592432717459,
])
yi_d2 = np.array([
0.0, -0.143381859909114, -0.286763719818227, -0.430145579727340, -0.573527439636453, -0.338618109140893,
-0.049667180016091, 0.239283749108707, 0.528234678233509, 0.420372976639513, 0.180240398139254,
-0.059892180361004, -0.300024758861262, -0.417263474066444, -0.460765871294580, -0.504268268522715,
-0.547770665750851, -0.389368656861538, -0.029062241854775, 0.331244173151988, 0.691550588158752,
0.921989439431505, 0.935982351147577, 0.949975262863648, 0.963968174579720, 0.891406885569776,
0.559182994381786, 0.226959103193797, -0.105264787994194, -0.404445863511035, -0.472327229329834,
-0.540208595148634, -0.608089960967433, -0.675971326786232, -0.663128269672918, -0.650285212559604,
-0.637442155446290, -0.624599098332976, -0.350226756086249, -0.038493087391889, 0.273240581302470,
0.584974249996826, 0.531682856584531, 0.356716442470013, 0.181750028355495, 0.006783614240977,
-0.153420862661300, -0.304768177236235, -0.456115491811170, -0.607462806386105, -0.687817791750278,
-0.697180447903692, -0.706543104057105, -0.715905760210518, -0.623426424267251, -0.361210434829511,
-0.098994445391772, 0.163221544045968, 0.308987259456777, 0.105402152786806, -0.098182953883166,
-0.301768060553138, -0.475311792852036, -0.438565904553427, -0.401820016254817, -0.365074127956207,
-0.328328239657597, 0.131228980416978, 0.590786200491558, 1.050343420566138, 1.509900640640717,
1.102186509264039, 0.570576470537193, 0.038966431810342, -0.492643606916509, -0.650239750830002,
-0.683164596472375, -0.716089442114750, -0.749014287757124, -0.526612288580197, -0.151014182511690,
0.224583923556814, 0.600182029625318, 0.627812742411677, 0.307476061915880, -0.012860618579924,
-0.333197299075721, -0.383822562291567, 0.015071203292524, 0.413964968876607, 0.812858734460697,
1.085549841871512, 0.979632974762537, 0.873716107653561, 0.767799240544586, 0.654663739950773,
0.490997804963078, 0.327331869975386, 0.163665934987692, 0.0,
])
yi_ad1 = np.array([
0, 0.004170164373445, 0.011115737580173, 0.020615063419503, 0.032298714890302, 0.045660376813401,
0.060231892427910, 0.075813180885901, 0.092501952567935, 0.110686524880963, 0.130868677637032,
0.153357821207108, 0.178215971638707, 0.205258950314118, 0.234108819065114, 0.264313291890776,
0.295375081800281, 0.326753151565222, 0.357945080592172, 0.388736333089680, 0.419282624940839,
0.450110713389674, 0.482066710123764, 0.516105768238528, 0.553199731265708, 0.594334805898634,
0.640475555315434, 0.692364771746437, 0.750407558205033, 0.814666627160915, 0.884880469463989,
0.960603933483209, 1.041315254368523, 1.126422708045129, 1.215268077688877, 1.307164784955765,
1.401436021211426, 1.497418214006542, 1.594468550710834, 1.692085995899572, 1.790070243397785,
1.888542259780677, 1.987937674642304, 2.088843826542736, 2.191718430510039, 2.296838961049921,
2.404302725268220, 2.514033161345466, 2.625794182200862, 2.739193700617453, 2.853684030369255,
2.968590847531448, 3.083200836550669, 3.196790651555201, 3.308627528048506, 3.418009815729590,
3.524431905730442, 3.627656652140256, 3.727717073374330, 3.824867908686134, 3.919316366135970,
4.011066165813679, 4.099911213450951, 4.185452118972609, 4.267224006141539, 4.344793819826894,
4.417766375583358, 4.485802516053269, 4.548818831389975, 4.607187379681194, 4.661753843351136,
4.713812579632492, 4.764705308291650, 4.815294002191847, 4.865892761884743, 4.916274580927416,
4.965838312719173, 5.013896930882124, 5.059729392707482, 5.102583399629591, 5.141784337510146,
5.176983368905221, 5.208218404675820, 5.235912583098308, 5.260732316617858, 5.283157696497049,
5.303340539569319, 5.321103131201137, 5.336045569661458, 5.347982548219602, 5.357135151392904,
5.364135482984910, 5.369974165591079, 5.375708539464273, 5.382293644794382, 5.390575362167333,
5.401286443327005, 5.415015799831229, 5.432185121115394, 5.453047420840733,
])
integral = 5.453047420840733
smooth = 0.992026535689226
return UnivariateData(x, y, xi, yi, yi_d1, yi_d2, yi_ad1, integral, smooth)
class NdGrid2dData(NamedTuple):
xy: Tuple[np.ndarray, np.ndarray]
z: np.ndarray
zi: np.ndarray
zi_d1: np.ndarray
zi_d2: np.ndarray
zi_ad1: np.ndarray
integral: float
smooth: Tuple[float, float]
@pytest.fixture(scope='session')
def ndgrid_2d_data() -> NdGrid2dData:
xy = (np.linspace(-3.0, 3.0, 5), np.linspace(-3.5, 3.5, 7))
z = np.array(
[[-0.153527183790132, 0.360477327564227, -0.400800187993851, -0.661751768834967,
1.39827150034968, 1.044246054228617, 0.069681364921588],
[0.211217178485871, 0.592683030706752, 1.294599451385471, -4.924883983709012,
-2.641771353280953, 0.245330967159293, 0.171928943618129],
[1.012132959440344, 0.132792505223302, -3.970096642307903, 0.702129940268655,
4.729521910567126, 0.208213433055832, -0.40275495492284],
[0.35749708646856, 2.409904780478664, 0.892801916808247, 7.563804764350773,
2.510824654279176, 0.317303593544217, 0.393080231785911],
[0.000706314884567, 1.009080744382149, -0.45874273220015, -0.323494125914201,
-1.700362064179427, -1.394209767885332, -0.645566364768713]
])
zi = np.array(
[[-0.055377680470166, 0.195656616225213, -0.295030253111251, -0.830533929888634,
0.193176060095987, 0.770374649757329, 0.252865339751650],
[0.471994652733459, 0.293417006151304, -0.758106516247562, -1.960431309380293,
-0.781936045165379, 0.216341632490716, 0.180333235920312],
[0.875919224697303, 0.067344259041702, -0.735889940425535, 0.882313890047783,
2.056305063365266, 0.896850201038262, -0.190314083560006],
[0.606245376082951, 0.941947682137626, 1.225331206624579, 3.379540894700002,
2.581257432070516, 0.581783850872262, -0.187728390603794],
[0.183397630824828, 0.805748594104382, 0.503605325241657, 0.264260721868410,
-0.874052860773297, -1.188420383689933, -0.617919628357980]
])
zi_d1 = np.array(
[[-0.121812472223565, -0.157326929297246, -0.828962829944406, -0.640465943383180,
0.503884652683063, 0.662549465069741, 0.501482011524879],
[-0.470497745190257, -0.466533613272337, 0.403062598673842, 0.767903075058928,
-0.334118185393139, -0.545086449738045, -0.154825057274055],
[0.194607321700336, 0.192434581435788, 1.293210437573223, 0.562712202846881,
-1.370259457802591, -0.823157992133989, -0.176531696944242],
[0.607431089728563, 0.510938352696447, -0.237734672001804, -1.058160086418725,
-0.269494123744448, 0.632430110330214, 0.577443254979813],
[0.006464517899505, -0.138732754726110, -1.426802191857966, -1.065472485030174,
1.029409644646945, 1.158453840382574, 0.696817777775121]
])
zi_d2 = np.array(
[[0., 0., 0., 0., 0., 0., 0.],
[0., 0.090236774837945, 3.432579482518258, -3.029508420063717,
-2.105055823406707, 1.260180219448802, 0.],
[0., -0.104263911255016, -2.890141730806884, -0.016560671330926,
3.251809714350141, -0.674203298932788, 0.],
[0., -0.111324652785139, -1.121581432777390, 3.822735995622450,
-0.837099042473098, -0.929483902301833, 0.],
[0., 0., 0., 0., 0., 0., 0.]
])
zi_ad1 = np.array(
[[0., 0., 0., 0., 0., 0., 0.],
[0., 0.509395864624591, 0.476401125691113, -1.598603993938397,
-3.760699480840437, -3.750794105947393, -3.076469947082991],
[0., 1.318187655218603, 0.563946491827388, -3.255188072198998,
-5.625801866969895, -4.504932326883326, -3.364137445816383],
[0., 2.511070517473814, 2.161223166080562, 0.664448782633726,
3.261109214575526, 7.566477396430049, 9.182402413317154],
[0., 3.799550963870154, 5.127806810771760, 6.567161731071815,
12.269550101243563, 17.158134933110624, 18.133537415972746]
])
integral = 18.133537415972746
smooth = (0.727272727272727, 0.850021862702230)
return NdGrid2dData(xy, z, zi, zi_d1, zi_d2, zi_ad1, integral, smooth)
class SurfaceData(NamedTuple):
xy: Tuple[np.ndarray, np.ndarray]
z: np.ndarray
@pytest.fixture(scope='session')
def surface_data() -> SurfaceData:
"""2-d surface data sample
"""
np.random.seed(12345)
xy = (np.linspace(-3.0, 3.0, 61), np.linspace(-3.5, 3.5, 51))
i, j = np.meshgrid(*xy, indexing='ij')
z = (3 * (1 - j) ** 2. * np.exp(-(j ** 2) - (i + 1) ** 2)
- 10 * (j / 5 - j ** 3 - i ** 5) * np.exp(-j ** 2 - i ** 2)
- 1 / 3 * np.exp(-(j + 1) ** 2 - i ** 2))
z += (np.random.randn(*z.shape) * 0.75)
return SurfaceData(xy, z)
|
|
"""Built-in Source baseclasses. In order of increasing functionality and decreasing generality:
* Source: only sets up default arguments and helper functions for caching.
Use e.g. if you have an analytic pdf
* HistogramPdfSource: + fetch/interpolate the PDF/PMF from a (multihist) histogram
Use e.g. if you have a numerically computable pdf (e.g. using convolution of some known functions)
* DensityEstimatingSource: + create that histogram by binning some sample of events
Use e.g. if you want to estimate density from a calibration data sample.
* MonteCarloSource: + get that sample from the source's own simulate method.
Use if you have a Monte Carlo to generate events. This was the original 'niche' for which blueice was created.
Parent methods (e.g. Source.compute_pdf) are meant to be called at the end of the child methods
that override them (e.g. HistogramPdfSource.compute_pdf).
"""
import inspect
import os
from functools import reduce
import numpy as np
from blueice.exceptions import PDFNotComputedException
from multihist import Histdd
from scipy.interpolate import RegularGridInterpolator
from . import utils
from .data_reading import read_files_in
__all__ = ['Source', 'HistogramPdfSource', 'DensityEstimatingSource', 'MonteCarloSource']
class Source(object):
"""Base class for a source of events."""
# Class-level cache for loaded sources
# Useful so we don't create possibly expensive objects
_data_cache = dict()
def __repr__(self):
return "%s[%s]" % (self.name, self.hash if hasattr(self, 'hash') else 'nohashknown')
def __init__(self, config, *args, **kwargs):
defaults = dict(name='unnamed_source',
label='Unnamed source',
color='black', # Color to use in plots
# Defaults for events_per_day and fraction_in_range. These immediately get converted into
# attributes, which can be modified dynamically (e.g. Not only can these be overriden in config,
# some child classes set them dynamically (eg DensityEstimatingSource will set them based on
# the sample events you pass in).
events_per_day=0, # Events per day this source produces (detected or not).
rate_multiplier=1, # Rate multiplier (independent of loglikelihood's rate multiplier)
fraction_in_range=1, # Fraction of simulated events that fall in analysis space.
# List of attributes you want to be stored in cache. When the same config is passed later
# (ignoring the dont_hash_settings), these attributes will be set from the cached file.
cache_attributes=[],
# Set to True if you want to call compute_pdf at a time of your convenience, rather than
# at the end of init.
delay_pdf_computation=False,
# List of names of settings which are not included in the hash. These should be all settings
# that have no impact on the pdf (e.g. whether to show progress bars or not).
dont_hash_settings=[],
extra_dont_hash_settings=[],
# If true, never retrieve things from the cache. Saving to cache still occurs.
force_recalculation=False,
# If true, never save things to the cache. Loading from cache still occurs.
never_save_to_cache=False,
cache_dir='pdf_cache',
task_dir='pdf_tasks')
c = utils.combine_dicts(defaults, config)
c['cache_attributes'] += ['fraction_in_range', 'events_per_day', 'pdf_has_been_computed']
c['dont_hash_settings'] += ['hash', 'rate_multiplier',
'force_recalculation', 'never_save_to_cache', 'dont_hash_settings',
'label', 'color', 'extra_dont_hash_settings', 'delay_pdf_computation',
'cache_dir', 'task_dir']
# Merge the 'extra' (per-source) dont hash settings into the normal dont_hash_settings
c['dont_hash_settings'] += c['extra_dont_hash_settings']
del c['extra_dont_hash_settings']
self.name = c['name']
del c['name']
# events_per_day and fraction_in_range may be modified / set properly for the first time later (see comments
# in 'defaults' above)
if hasattr(self, 'events_per_day'):
raise ValueError("events_per_day defaults should be set via config!")
self.events_per_day = c['events_per_day']
self.fraction_in_range = c['fraction_in_range']
self.pdf_has_been_computed = False
# What is this source's unique id?
if 'hash' in c:
# id already given in config: probably because config has already been 'pimped' with loaded objects
self.hash = c['hash']
else:
# Compute id from config
hash_config = utils.combine_dicts(c, exclude=c['dont_hash_settings'])
self.hash = c['hash'] = utils.deterministic_hash(hash_config)
# What filename would a source with this config have in the cache?
if not os.path.exists(c['cache_dir']):
os.makedirs(c['cache_dir'])
self._cache_filename = os.path.join(c['cache_dir'], self.hash)
# Can we load this source from cache? If so, do so: we don't even need to load any files...
if not c['force_recalculation'] and os.path.exists(self._cache_filename):
self.from_cache = True
if self.hash in self._data_cache:
# We already loaded this from cache sometime in this process
stuff = self._data_cache[self.hash]
else:
# Load it from disk, and store in the class-level cache
stuff = self._data_cache[self.hash] = utils.read_pickle(self._cache_filename)
for k, v in stuff.items():
if k not in c['cache_attributes']:
raise ValueError("%s found in cached file, but you only wanted %s from cache. "
"Old cache?" % (k, c['cache_attributes']))
setattr(self, k, v)
else:
self.from_cache = False
# Convert any filename-valued settings to whatever is in those files.
c = read_files_in(c, config['data_dirs'])
self.config = c
if self.from_cache:
assert self.pdf_has_been_computed
else:
if self.config['delay_pdf_computation']:
self.prepare_task()
else:
self.compute_pdf()
def compute_pdf(self):
"""Initialize, then cache the PDF. This is called
* AFTER the config initialization and
* ONLY when source is not already loaded from cache. The caching mechanism exists to store the quantities you
need to compute here.
"""
if self.pdf_has_been_computed:
raise RuntimeError("compute_pdf called twice on a source!")
self.pdf_has_been_computed = True
self.save_to_cache()
def save_to_cache(self):
"""Save attributes in self.config['cache_attributes'] of this source to cache."""
if not self.from_cache and not self.config['never_save_to_cache']:
utils.save_pickle({k: getattr(self, k) for k in self.config['cache_attributes']},
self._cache_filename)
return self._cache_filename
def prepare_task(self):
"""Create a task file in the task_dir for delayed/remote computation"""
task_filename = os.path.join(self.config['task_dir'], self.hash)
utils.save_pickle((self.__class__, self.config), task_filename)
def pdf(self, *args):
raise NotImplementedError
def get_pmf_grid(self, *args):
"""Returns pmf_grid, n_events:
- pmf_grid: pmf per bin in the analysis space
- n_events: if events were used for density estimation: number of events per bin (for DensityEstimatingSource)
otherwise float('inf')
This is used by binned likelihoods. if you have an unbinned density estimator, you'll have to write
some integration / sampling routine!
"""
raise NotImplementedError
def simulate(self, n_events):
"""Simulate n_events according to the source. It's ok to return less than n_events events,
if you decide some events are not detectable.
"""
raise NotImplementedError
class HistogramPdfSource(Source):
"""A source which takes its PDF values from a multihist histogram.
"""
_pdf_histogram = None
_bin_volumes = None
_n_events_histogram = None
def __init__(self, config, *args, **kwargs):
"""Prepares the PDF of this source for use.
"""
defaults = dict(pdf_sampling_multiplier=1,
pdf_interpolation_method='linear',)
config = utils.combine_dicts(defaults, config)
config['cache_attributes'] = config.get('cache_attributes', []) + \
['_pdf_histogram', '_n_events_histogram', '_bin_volumes']
Source.__init__(self, config, *args, **kwargs)
def build_histogram(self):
"""Set the _pdf_histogram (Histdd), _n_events_histogram (Histdd) and _bin_volumes (numpy array) attributes
"""
raise NotImplementedError
def compute_pdf(self):
# Fill the histogram with either events or an evaluated pdf
self.build_histogram()
Source.compute_pdf(self)
def pdf(self, *args):
if not self.pdf_has_been_computed:
raise PDFNotComputedException("%s: Attempt to call a PDF that has not been computed" % self)
method = self.config['pdf_interpolation_method']
if method == 'linear':
if not hasattr(self, '_pdf_interpolator'):
# First call:
# Construct a linear interpolator between the histogram bins
self._pdf_interpolator = RegularGridInterpolator(self._pdf_histogram.bin_centers(),
self._pdf_histogram.histogram)
# The interpolator works only within the bin centers region: clip the input data to that.
# Assuming you've cut the data to the analysis space first (which you should have!)
# this is equivalent to assuming constant density in the outer half of boundary bins
clipped_data = []
for dim_i, x in enumerate(args):
bcs = self._pdf_histogram.bin_centers(dim_i)
clipped_data.append(np.clip(x, bcs.min(), bcs.max()))
return self._pdf_interpolator(np.transpose(clipped_data))
elif method == 'piecewise':
return self._pdf_histogram.lookup(*args)
else:
raise NotImplementedError("PDF Interpolation method %s not implemented" % method)
def simulate(self, n_events):
"""Simulate n_events from the PDF histogram"""
if not self.pdf_has_been_computed:
raise PDFNotComputedException("%s: Attempt to simulate events from a PDF that has not been computed" % self)
events_per_bin = self._pdf_histogram * self._bin_volumes
q = events_per_bin.get_random(n_events)
# Convert to numpy record array
d = np.zeros(n_events,
dtype=[('source', np.int)] +
[(x[0], np.float)
for x in self.config['analysis_space']])
for i, x in enumerate(self.config['analysis_space']):
d[x[0]] = q[:, i]
return d
def get_pmf_grid(self):
return self._pdf_histogram.histogram * self._bin_volumes, self._n_events_histogram.histogram
class DensityEstimatingSource(HistogramPdfSource):
"""A source which estimates its PDF by some events you give to it.
Child classes need to implement get_events_for_density_estimate, and call compute_pdf when they are ready
(usually at the end of their own init).
"""
def __init__(self, config, *args, **kwargs):
"""Prepares the PDF of this source for use.
"""
defaults = dict(n_events_for_pdf=1e6)
config = utils.combine_dicts(defaults, config)
config['cache_attributes'] = config.get('cache_attributes', [])
HistogramPdfSource.__init__(self, config, *args, **kwargs)
def build_histogram(self):
# Get the events to estimate the PDF
dimnames, bins = zip(*self.config['analysis_space'])
mh = Histdd(bins=bins, axis_names=dimnames)
# Get a generator function which will give us the events
get = self.get_events_for_density_estimate
if not inspect.isgeneratorfunction(get):
def get():
return [self.get_events_for_density_estimate()]
n_events = 0
for events, n_simulated in get():
n_events += n_simulated
mh.add(*utils._events_to_analysis_dimensions(events, self.config['analysis_space']))
self.fraction_in_range = mh.n / n_events
# Convert the histogram to a density estimate
# This means we have to divide by
# - the number of events IN RANGE received
# (fraction_in_range keeps track of how many events were not in range)
# - the bin sizes
self._pdf_histogram = mh.similar_blank_hist()
self._pdf_histogram.histogram = mh.histogram.astype(np.float) / mh.n
# For the bin widths we need to take an outer product of several vectors, for which numpy has no builtin
# This reduce trick does the job instead, see http://stackoverflow.com/questions/17138393
self._bin_volumes = reduce(np.multiply, np.ix_(*[np.diff(bs) for bs in bins]))
self._pdf_histogram.histogram /= self._bin_volumes
self._n_events_histogram = mh
return mh
def get_events_for_density_estimate(self):
"""Return, or yield in batches, (events for use in density estimation, events simulated/read)
Passing the count is necessary because you sometimes work with simulators that already cut some events.
"""
raise NotImplementedError
class MonteCarloSource(DensityEstimatingSource):
"""A DensityEstimatingSource which gets the events for the density estimator from its own simulate() method.
Child classes have to implement simulate.
"""
def __init__(self, config, *args, **kwargs):
defaults = dict(n_events_for_pdf=1e6,
pdf_sampling_multiplier=1,
pdf_sampling_batch_size=1e6)
config = utils.combine_dicts(defaults, config)
config['dont_hash_settings'] = config.get('dont_hash_settings', []) + ['pdf_sampling_batch_size']
DensityEstimatingSource.__init__(self, config, *args, **kwargs)
def get_events_for_density_estimate(self):
# Simulate batches of events at a time (to avoid memory errors, show a progressbar, and split up among machines)
# Number of events to simulate will be rounded up to the nearest batch size
n_events = self.config['n_events_for_pdf'] * self.config['pdf_sampling_multiplier']
batch_size = self.config['pdf_sampling_batch_size']
if n_events <= batch_size:
batch_size = n_events
for _ in range(int(n_events // batch_size)):
result = self.simulate(n_events=batch_size)
yield result, batch_size
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import sys
import os
import io
import getopt
import unittest
import doctest
import inspect
from multiprocessing import Process, Queue
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import coverage
VERBOSE = 2
def create_examples_testsuite():
# gather information on examples
# all functions inside the examples starting with 'ex_' or 'recipe_'
# are considered as tests
# find example files in examples directory
root_dir = 'examples/'
files = []
skip = ['__init__.py']
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-3:] != '.py':
continue
if 'examples/data' in root:
continue
f = os.path.join(root, filename)
f = f.replace('/', '.')
f = f[:-3]
files.append(f)
# create empty testsuite
suite = unittest.TestSuite()
# find matching functions in
for idx, module in enumerate(files):
module1, func = module.split('.')
module = __import__(module)
func = getattr(module, func)
funcs = inspect.getmembers(func, inspect.isfunction)
[suite.addTest(unittest.FunctionTestCase(v))
for k, v in funcs if k.startswith(("ex_", "recipe_"))]
return suite
class NotebookTest(unittest.TestCase):
def __init__(self, nbfile, cov):
setattr(self.__class__, nbfile, staticmethod(self._runTest))
super(NotebookTest, self).__init__(nbfile)
self.nbfile = nbfile
self.cov = cov
def _runTest(self):
kernel = 'python%d' % sys.version_info[0]
cur_dir = os.path.dirname(self.nbfile)
with open(self.nbfile) as f:
nb = nbformat.read(f, as_version=4)
if self.cov:
covdict = {'cell_type': 'code', 'execution_count': 1,
'metadata': {'collapsed': True}, 'outputs': [],
'nbsphinx': 'hidden',
'source': 'import coverage\n'
'coverage.process_startup()\n'
'import sys\n'
'sys.path.append("{0}")\n'.format(cur_dir)
}
nb['cells'].insert(0, nbformat.from_dict(covdict))
exproc = ExecutePreprocessor(kernel_name=kernel, timeout=600)
try:
run_dir = os.getenv('WRADLIB_BUILD_DIR', cur_dir)
exproc.preprocess(nb, {'metadata': {'path': run_dir}})
except CellExecutionError as e:
raise e
if self.cov:
nb['cells'].pop(0)
with io.open(self.nbfile, 'wt') as f:
nbformat.write(nb, f)
self.assertTrue(True)
def create_notebooks_testsuite(**kwargs):
# gather information on notebooks
# all notebooks in the notebooks folder
# are considered as tests
# find notebook files in notebooks directory
cov = kwargs.pop('cov')
root_dir = os.getenv('WRADLIB_NOTEBOOKS', 'notebooks')
files = []
skip = []
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-6:] != '.ipynb':
continue
# skip checkpoints
if '/.' in root:
continue
f = os.path.join(root, filename)
files.append(f)
# create one TestSuite per Notebook to treat testrunners
# memory overconsumption on travis-ci
suites = []
for file in files:
suite = unittest.TestSuite()
suite.addTest(NotebookTest(file, cov))
suites.append(suite)
return suites
def create_doctest_testsuite():
# gather information on doctests, search in only wradlib folder
root_dir = 'wradlib/'
files = []
skip = ['__init__.py', 'version.py', 'bufr.py', 'test_']
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-3:] != '.py':
continue
if 'wradlib/tests' in root:
continue
f = os.path.join(root, filename)
f = f.replace('/', '.')
f = f[:-3]
files.append(f)
# put modules in doctest suite
suite = unittest.TestSuite()
for module in files:
suite.addTest(doctest.DocTestSuite(module))
return suite
def create_unittest_testsuite():
# gather information on tests (unittest etc)
root_dir = 'wradlib/tests/'
return unittest.defaultTestLoader.discover(root_dir)
def single_suite_process(queue, test, verbosity, **kwargs):
test_cov = kwargs.pop('coverage', 0)
test_nb = kwargs.pop('notebooks', 0)
if test_cov and not test_nb:
cov = coverage.coverage()
cov.start()
all_success = 1
for ts in test:
if ts.countTestCases() != 0:
res = unittest.TextTestRunner(verbosity=verbosity).run(ts)
all_success = all_success & res.wasSuccessful()
if test_cov and not test_nb:
cov.stop()
cov.save()
queue.put(all_success)
def keep_tests(suite, arg):
newsuite = unittest.TestSuite()
try:
for tc in suite:
try:
if tc.id().find(arg) != -1:
newsuite.addTest(tc)
except AttributeError:
new = keep_tests(tc, arg)
if new.countTestCases() != 0:
newsuite.addTest(new)
except TypeError:
pass
return newsuite
def main():
args = sys.argv[1:]
usage_message = """Usage: python testrunner.py options arg
If run without options, testrunner displays the usage message.
If all tests suites should be run, use the -a option.
If arg is given, only tests containing arg are run.
options:
-a
--all
Run all tests (examples, test, doctest, notebooks)
-m
Run all tests within a single testsuite [default]
-M
Run each suite as separate instance
-e
--example
Run only examples tests
-d
--doc
Run only doctests
-u
--unit
Run only unit test
-n
--notebook
Run only notebook test
-s
--use-subprocess
Run every testsuite in a subprocess.
-c
--coverage
Run notebook tests with code coverage
-v level
Set the level of verbosity.
0 - Silent
1 - Quiet (produces a dot for each succesful test)
2 - Verbose (default - produces a line of output for each test)
-h
Display usage information.
"""
test_all = 0
test_examples = 0
test_docs = 0
test_notebooks = 0
test_units = 0
test_subprocess = 0
test_cov = 0
verbosity = VERBOSE
try:
options, arg = getopt.getopt(args, 'aednuschv:',
['all', 'example', 'doc',
'notebook', 'unit', 'use-subprocess',
'coverage', 'help'])
except getopt.GetoptError as e:
err_exit(e.msg)
if not options:
err_exit(usage_message)
for name, value in options:
if name in ('-a', '--all'):
test_all = 1
elif name in ('-e', '--example'):
test_examples = 1
elif name in ('-d', '--doc'):
test_docs = 1
elif name in ('-n', '--notebook'):
test_notebooks = 1
elif name in ('-u', '--unit'):
test_units = 1
elif name in ('-s', '--use-subprocess'):
test_subprocess = 1
elif name in ('-c', '--coverage'):
test_cov = 1
elif name in ('-h', '--help'):
err_exit(usage_message, 0)
elif name == '-v':
verbosity = int(value)
else:
err_exit(usage_message)
if not (test_all or test_examples or test_docs or
test_notebooks or test_units):
err_exit('must specify one of: -a -e -d -n -u')
testSuite = []
if test_all:
testSuite.append(create_examples_testsuite())
testSuite.append(create_notebooks_testsuite(cov=test_cov))
testSuite.append(create_doctest_testsuite())
testSuite.append(create_unittest_testsuite())
elif test_examples:
testSuite.append(create_examples_testsuite())
elif test_notebooks:
testSuite.append(create_notebooks_testsuite(cov=test_cov))
elif test_docs:
testSuite.append(unittest.TestSuite(create_doctest_testsuite()))
elif test_units:
testSuite.append(create_unittest_testsuite())
all_success = 1
if test_subprocess:
for test in testSuite:
if arg:
test = keep_tests(test, arg[0])
queue = Queue()
keywords = {'coverage': test_cov, 'notebooks': test_notebooks}
proc = Process(target=single_suite_process,
args=(queue, test, verbosity),
kwargs=keywords)
proc.start()
result = queue.get()
proc.join()
# all_success should be 0 in the end
all_success = all_success & result
else:
if test_cov and not test_notebooks:
cov = coverage.coverage()
cov.start()
for ts in testSuite:
if arg:
ts = keep_tests(ts, arg[0])
for test in ts:
if test.countTestCases() != 0:
result = unittest.TextTestRunner(verbosity=verbosity).\
run(test)
# all_success should be 0 in the end
all_success = all_success & result.wasSuccessful()
if test_cov and not test_notebooks:
cov.stop()
cov.save()
if all_success:
sys.exit(0)
else:
# This will return exit code 1
sys.exit("At least one test has failed. "
"Please see test report for details.")
def err_exit(message, rc=2):
sys.stderr.write("\n%s\n" % message)
sys.exit(rc)
if __name__ == '__main__':
main()
|
|
import numpy as np
import matplotlib.pyplot as plt
from traitsui.qt4.editor import Editor
from traitsui.qt4.basic_editor_factory import BasicEditorFactory
from traits.api import Instance, HasTraits, Int, Str, Float, List, Array, Bool, Tuple, Button, Dict, Enum, Range, on_trait_change
from traitsui.api import Handler, View, Item, UItem, CheckListEditor, HGroup, VGroup, Include
from pyface.api import FileDialog, OK
from pyface.qt import QtGui, QtCore
from scipy import ndimage
import matplotlib as mpl
from matplotlib.colors import LogNorm
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
from matplotlib.widgets import RectangleSelector, SpanSelector
from DraggableResizableRectangle import DraggableResizeableRectangle, AnnotatedRectangle, AnnotatedLine, DraggableResizeableLine
import wx
if mpl.get_backend() == "Qt5Agg":
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
elif mpl.get_backend() == "Qt4Agg":
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
else:
print("TraitsMPLWidget: Could not find Qt4 oder Qt5. Don't know if I'm working.")
try:
import win32clipboard
print("Using win32clipboard")
except:
import pyperclip
print("Using Linux clipboard")
__author__ = 'd.wilson'
app = wx.App(False)
DISPLAY_SIZE = wx.GetDisplaySize()
DISPLAY_DPI = wx.ScreenDC().GetPPI()
class _ScrollableMPLFigureEditor(Editor):
scrollable = True
canvas = Instance(FigureCanvas)
toolbar = Instance(NavigationToolbar2QT)
def init(self, parent):
self.control = self._create_canvas(parent)
self.set_tooltip()
def update_editor(self):
pass
def _create_canvas(self, parent):
print(self.__class__.__name__, ": Creating canvas (_create_canvas)")
frame_canvas = QtGui.QWidget()
scrollarea = QtGui.QScrollArea()
mpl_canvas = FigureCanvas(self.value)
mpl_canvas.setParent(scrollarea)
scrollarea.setWidget(mpl_canvas)
mpl_toolbar = NavigationToolbar2QT(mpl_canvas, frame_canvas)
# mpl_toolbar.setIconSize(QtCore.QSize(30, 30)) # activate for smaller icon sizes
vbox = QtGui.QVBoxLayout()
vbox.addWidget(scrollarea)
vbox.addWidget(mpl_toolbar)
vbox.setGeometry(QtCore.QRect(0, 0, 1000, 1000))
frame_canvas.setLayout(vbox)
return frame_canvas
class _MPLFigureEditor(Editor):
canvas = Instance(FigureCanvas)
toolbar = Instance(NavigationToolbar2QT)
def init(self, parent):
self.control = self._create_canvas(parent)
self.set_tooltip()
def update_editor(self):
pass
def _create_canvas(self, parent):
print(self.__class__.__name__, ": Creating canvas (_create_canvas)")
# matplotlib commands to create a canvas
frame = QtGui.QWidget()
mpl_canvas = FigureCanvas(self.value)
mpl_canvas.setParent(frame)
mpl_toolbar = NavigationToolbar2QT(mpl_canvas, frame)
# mpl_toolbar.setIconSize(QtCore.QSize(30, 30)) # activate for smaller icon sizes
vbox = QtGui.QVBoxLayout()
vbox.addWidget(mpl_canvas)
vbox.addWidget(mpl_toolbar)
frame.setLayout(vbox)
return frame
class MPLFigureEditor(BasicEditorFactory):
klass = _MPLFigureEditor
class ScrollableMPLFigureEditor(BasicEditorFactory):
klass = _ScrollableMPLFigureEditor
class MPLInitHandler(Handler):
"""Handler calls mpl_setup() to initialize mpl events"""
def init(self, info):
"""
This method gets called after the controls have all been
created but before they are displayed.
"""
# print("MPLInitHandler: info = ", info)
info.object.mpl_setup()
return True
class MinimalFigure(HasTraits):
figure_kwargs = Dict()
figure = Instance(Figure)
canvas = Instance(FigureCanvas)
clickdata = Tuple()
# some options - more to be added!
axes = List()
axes_selector = Enum(values='axes')
options_btn = Button('options')
title = Str()
xlabel = Str()
ylabel = Str()
fontsize = Range(0, 30, 12)
grid = Bool(False)
autoscale = Bool(True)
clear_btn = Button('clear')
lines_list = List()
line_selector = Enum(values='lines_list')
copy_data_btn = Button('copy data')
save_fig_btn = Button('save figure')
def __init__(self, *args, **kwargs):
# Figure kwargs: figsize=None, dpi=None, facecolor=None, edgecolor=None, linewidth=0.0, frameon=None, subplotpars=None, tight_layout=None
super(MinimalFigure, self).__init__()
self.figure_kwargs = kwargs
def _figure_default(self):
print(self.__class__.__name__, ": Create figure (_figure_default)")
fig = plt.figure(**self.figure_kwargs)
fig.patch.set_facecolor('w')
return fig
def update_axes(self):
print(self.__class__.__name__, ": Updating axes...")
self.axes = self.figure.get_axes()
def add_figure(self, fig):
print(self.__class__.__name__, ": Adding figure")
self.figure = fig
self.canvas = fig.canvas
self.add_trait('figure', fig)
self.add_trait_listener(fig)
self.mpl_setup()
self.update_axes()
@on_trait_change('figure,axes[]') # ,
def update_lines(self):
print(self.__class__.__name__, ": figure changed! ")
self.update_axes() # get axes
# get lines
lines = []
for ax in self.figure.get_axes():
for l in ax.get_lines():
tmplinename = self._replace_line2D_str(l)
if '_nolegend_' in tmplinename:
continue
lines.append(tmplinename)
self.lines_list = sorted(lines)
self._set_axes_property_variables() # get labels
if self.canvas:
self.canvas.draw()
def mpl_setup(self):
print(self.__class__.__name__, ": Running mpl_setup - connecting button press events")
self.canvas = self.figure.canvas # creates link (same object)
cid = self.figure.canvas.mpl_connect('button_press_event', self.__onclick)
def __onclick(self, event):
if event is None:
return None
self.clickdata = (event.button, event.x, event.y, event.xdata, event.ydata)
print(self.__class__.__name__, ": %s" % event)
def clear(self):
self._clear_btn_fired()
def _clear_btn_fired(self):
ax = self.figure.get_axes()
for a in ax:
print(self.__class__.__name__, ": Clearing axis ", a)
a.clear()
self.xlabel = ''
self.ylabel = ''
self.title = ''
for ax in self.figure.axes:
ax.grid(self.grid)
self.draw_canvas()
def _replace_line2D_str(self, s):
s = str(s)
return s.replace('Line2D(', '').replace(')', '')
def _is_line_in_axes(self, label, ax=None):
"""
:param label: Label of plot
:return: False if line is not in axes, line if line is in axes
"""
lines = [] # use update_lines()
if ax is None:
for ax in self.figure.get_axes():
for l in ax.get_lines():
tmplinename = self._replace_line2D_str(l)
if '_nolegend_' in tmplinename:
continue
lines.append(self._replace_line2D_str(l))
if label == self._replace_line2D_str(l):
return l
# self.lines_list = sorted(lines)
else:
for l in ax.get_lines():
if label == self._replace_line2D_str(l):
return l
return False
def _copy_data_btn_fired(self):
# due to https://github.com/matplotlib/matplotlib/issues/8458, only support for xy-error in Basicfigure()
print(self.__class__.__name__, ": Trying to copy data to clipboard")
line = self._is_line_in_axes(self.line_selector)
x = line.get_xdata()
y = line.get_ydata()
text = 'x \t y \n'
for i in range(len(x)):
text += str(x[i]) + "\t" + str(y[i]) + "\n"
self.add_to_clip_board(text)
def add_to_clip_board(self, text):
try:
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(text, win32clipboard.CF_UNICODETEXT)
win32clipboard.CloseClipboard()
except:
print("MinimalFigure: Could not copy to win32 clipboard. Trying linux version or install win32clipboard ")
try:
pyperclip.copy(text)
except:
print("MinimalFigure: Could not copy text for linux. Install pyperclip")
def get_axes(self):
self.axes = self.create_axis_if_no_axis()
return self.axes
@on_trait_change('figure')
def create_axis_if_no_axis(self):
# creates one axis if none created
axes = self.figure.get_axes()
if len(axes) == 0:
self.figure.add_subplot(111)
axes = self.figure.get_axes()
self.axes = axes
self._set_axes_property_variables()
return axes
@on_trait_change('figure')
def draw(self):
if self.autoscale:
axes = self.figure.get_axes()
for ax in axes:
ax.relim()
ax.autoscale()
self.draw_canvas()
def draw_canvas(self):
try:
self.canvas.draw_idle() # Queue redraw of the Agg buffer and request Qt paintEvent (Qt Case) -> SpeedUp
# self.canvas.draw()
except AttributeError:
print("MinimalFigure: Canvas is not ready.")
def _fontsize_changed(self):
mpl.rcParams.update({'font.size': self.fontsize})
self.draw_canvas()
def _grid_changed(self):
try:
self.axes_selector.grid(self.grid)
self.draw_canvas()
except AttributeError:
print("MinimalFigure: Axes not ready")
@on_trait_change('axes_selector')
def _set_axes_property_variables(self):
self.title = self.axes_selector.get_title()
self.xlabel = self.axes_selector.get_xlabel()
self.ylabel = self.axes_selector.get_ylabel()
self.axes_selector.grid(self.grid)
def _title_changed(self):
self.create_axis_if_no_axis()
self.axes_selector.set_title(self.title)
self.draw_canvas()
def _xlabel_changed(self):
self.create_axis_if_no_axis()
self.axes_selector.set_xlabel(self.xlabel)
self.draw_canvas()
def _ylabel_changed(self):
self.create_axis_if_no_axis()
self.axes_selector.set_ylabel(self.ylabel)
self.draw_canvas()
def _options_btn_fired(self):
self.edit_traits(view='traits_options_view')
def _save_fig_btn_fired(self):
dlg = FileDialog(action='save as')
if dlg.open() == OK:
self.savefig(dlg.directory + '/' + dlg.filename + ".png", dpi=300)
self.savefig(dlg.directory + '/' + dlg.filename + ".eps")
self.savefig(dlg.directory + '/' + dlg.filename + ".pdf")
# cPickle.dump(self, open("dlg.filename" + ".pkl", "wb"))
def savefig(self, *args, **kwargs):
self.figure.savefig(*args, **kwargs)
def options_group(self):
g = HGroup(
UItem('options_btn'),
UItem('clear_btn'),
UItem('line_selector'),
UItem('copy_data_btn'),
UItem('save_fig_btn'),
)
return g
def traits_view(self):
traits_view = View(
UItem('figure', editor=MPLFigureEditor(), style='custom'),
Include('options_group'),
handler=MPLInitHandler,
resizable=True,
scrollable=True,
)
return traits_view
def traits_scroll_view(self):
traits_view = View(
UItem('figure', editor=ScrollableMPLFigureEditor(), style='custom'),
Include('options_group'),
handler=MPLInitHandler,
resizable=True,
scrollable=True,
)
return traits_view
def traits_options_view(self):
traits_options_view = View(
Item('axes_selector'),
Item('title'),
Item('xlabel'),
Item('ylabel'),
Item('fontsize'),
HGroup(
Item('grid'),
Item('autoscale'),
),
Item('clickdata', style='readonly'),
resizable=True,
)
return traits_options_view
class BasicFigure(MinimalFigure):
mask_data_bool = Bool(True)
mask_length = Int(100000)
normalize_bool = Bool(False)
normalize_max = Float()
normalize_maxes = List()
log_bool = Bool(False)
draw_legend_bool = Bool(True)
# image stuff
origin = Str('lower')
img_bool = Bool(False)
img_max = Float(1.)
img_data = Array()
img_kwargs = Dict
zlabel = Str()
vmin_lv, vmin_hv, vmax_lv, vmax_hv = Float(0.), Float(0.), Float(1.), Float(1.)
vmin = Range('vmin_lv', 'vmin_hv')
vmax = Range('vmax_lv', 'vmax_hv')
cmaps = List
cmap_selector = Enum(values='cmaps')
image_slider_btn = Button('z-slider')
errorbar_data = Dict() # this has is needed because of https://github.com/matplotlib/matplotlib/issues/8458
_xerr = Dict()
_yerr = Dict()
def __init__(self, **kwargs):
super(BasicFigure, self).__init__(**kwargs)
self.grid = True
def _test_plot_kwargs(self, kwargs):
if 'fmt' in kwargs:
fmt = kwargs['fmt']
del kwargs['fmt']
else:
fmt = ''
if 'label' not in kwargs:
raise Exception("BasicFigure: Please provide a label for datapoints.")
else:
label = kwargs['label']
return fmt, label
def _mask_data(self, data):
# fast when data is not too big (like 70M datapoints, but still works. Never possible with matplotlib)
if not self.mask_data_bool:
return data
else:
data = np.array(data)
steps = len(data) / self.mask_length
masked_data = data[0:-1:int(steps)]
return masked_data
def _zlabel_changed(self):
if self.img_bool:
self.cb.set_label(self.zlabel)
self.draw()
def _cmap_selector_changed(self):
if self.img_bool:
self.img.set_cmap(self.cmap_selector)
self.draw()
def _cmaps_default(self):
print(self.__class__.__name__, ": Initiating colormaps")
cmaps = sorted(m for m in mpl._cm.datad)
return cmaps
def _normalize_bool_changed(self, old=None, new=None):
# Function is a little bit long since it handles normalization completly by itself
# Maybe there is a better way, but it's working and i do not have time to think about a better one
if old != new and self.img_bool is False:
self.set_animation_for_lines(False)
self.normalize_max = 0.
if self.img_bool:
if self.normalize_bool:
self.img_max = np.nanmax(self.img_data)
self.img_data = self.img_data / self.img_max
else:
self.img_data = self.img_data * self.img_max
self.update_imshow(self.img_data)
else:
if self.normalize_bool:
self.normalize_maxes = []
line = None
for l in self.lines_list:
line = self._is_line_in_axes(l)
if line is False:
continue
x, y = line.get_data()
max = np.nanmax(y)
self.normalize_maxes.append(max)
if self.normalize_max < max:
self.normalize_max = max
for l in self.lines_list:
line = self._is_line_in_axes(l)
if line is False:
continue
x, y = line.get_data()
line.set_data(x, y / self.normalize_max)
if not line.get_animated():
self.draw()
else:
line = None
if len(self.normalize_maxes) > 0:
for i, l in enumerate(self.lines_list):
line = self._is_line_in_axes(l, self.axes_selector)
if line is False:
continue
x, y = line.get_data()
max = np.nanmax(y)
if old != new:
line.set_data(x, y / max * self.normalize_maxes[i])
else:
line.set_data(x, y)
if line is not None and line is not False:
if not line.get_animated():
self.draw()
def draw(self):
if self.autoscale and not self.img_bool:
axes = self.figure.get_axes()
for ax in axes:
ax.relim()
ax.autoscale()
# ax.autoscale_view(True,True,True)
self.draw_canvas()
def _img_bool_changed(self, val):
self.figure.clear()
if val:
self.grid = False
else:
self.grid = True
self.create_axis_if_no_axis()
def _log_bool_changed(self):
if self.img_bool:
self.clear()
if not self.log_bool:
self.img_kwargs.pop('norm')
self.imshow(self.img_data, **self.img_kwargs)
else:
self.set_animation_for_lines(False) # has to be done, otherwise no datapoints
if self.log_bool: # TODO: Maybe add xscale log, but not needed now.
# self.axes_selector.set_xscale("log", nonposx='clip')
self.axes_selector.set_yscale("log", nonposy='clip')
else:
self.axes_selector.set_yscale("linear")
self.draw()
def _image_slider_btn_fired(self):
self.autoscale = False
self.edit_traits(view='image_slider_view')
def _clear_btn_fired(self):
if self.img_bool:
self.img_bool = False # also triggers
else:
ax = self.figure.get_axes()
for a in ax:
print("MinimalFigure: Clearing axis ", a)
a.clear()
self.xlabel = ''
self.ylabel = ''
self.title = ''
for ax in self.figure.axes:
ax.grid(self.grid)
self.errorbar_data = {}
self.draw_canvas()
def imshow(self, z, ax=0, **kwargs):
if self.normalize_bool:
self._normalize_bool_changed()
return
if self.log_bool:
kwargs['norm'] = LogNorm()
if np.any(z < 0.):
print(self.__class__.__name__, ": WARNING - All values below 0. has been set to 0.")
z[np.where(z < 0.)] = 0.
self.img_data = np.array(z)
if 'label' in kwargs:
self.label = kwargs.pop('label')
if 'origin' in kwargs:
self.origin = kwargs['origin']
if 'aspect' in kwargs:
aspect = kwargs.pop('aspect')
else:
aspect = 'auto'
if not self.img_bool:
self.img_bool = True
self.img = self.axes_selector.imshow(self.img_data, aspect=aspect, **kwargs)
if not hasattr(self, "label"):
self.label = ''
self.cb = self.figure.colorbar(self.img, label=self.label)
self.draw()
else:
self.update_imshow(self.img_data, ax=ax)
if 'extent' in kwargs.keys():
self.img.set_extent(kwargs['extent'])
assert type(self.img) == mpl.image.AxesImage
self._set_cb_slider()
self.img_kwargs = kwargs
def update_imshow(self, z, ax=0):
z = np.array(z)
self.img.set_data(z)
if self.autoscale:
self.img.autoscale()
self.draw()
@on_trait_change('autoscale')
def _set_cb_slider(self):
if self.autoscale and self.img_bool:
minv, maxv = float(np.nanmin(self.img_data).round(2)), float(np.nanmax(self.img_data).round(2))
self.vmin_lv = minv
self.vmin_hv = maxv
self.vmax_lv = minv
self.vmax_hv = maxv
self.vmin = self.vmin_lv
self.vmax = self.vmax_hv
def _vmin_changed(self):
vmin = self.vmin
if self.log_bool:
if self.vmin < 0.:
vmin = 0.
if not self.autoscale:
self.img.set_clim(vmin=vmin, vmax=self.vmax)
self.draw()
def _vmax_changed(self):
vmin = self.vmin
if self.log_bool and self.vmin < 0.:
vmin = 0.
if not self.autoscale:
self.img.set_clim(vmin=vmin, vmax=self.vmax)
self.draw()
def axvline(self, pos, ax=0, **kwargs):
self.ax_line(pos, 'axvline', ax=ax, **kwargs)
def axhline(self, pos, ax=0, **kwargs):
self.ax_line(pos, 'axhline', ax=ax, **kwargs)
def ax_line(self, pos, func_str, ax=0, **kwargs):
# self.img_bool = False
fmt, label = self._test_plot_kwargs(kwargs)
axes = self.figure.get_axes()
line = self._is_line_in_axes(label)
nodraw = False
if 'nodraw' in kwargs:
if kwargs.pop('nodraw'):
nodraw = True
if not line:
print("BasicFigure: Plotting axhline ", label)
if type(ax) == int:
line = getattr(axes[ax],func_str)(pos, **kwargs)
elif hasattr(ax, func_str):
line = getattr(ax, func_str)(pos, **kwargs)
else:
raise TypeError('ax can be an int or the axis itself!')
self.lines_list.append(label)
else:
line.remove()
if type(ax) == int:
line = getattr(axes[ax], func_str)(pos, **kwargs)
elif hasattr(ax, func_str):
line = getattr(ax, func_str)(pos, **kwargs)
else:
raise TypeError('ax can be an int or the axis itself!')
self.lines_list.append(label)
self.draw_legend()
if not nodraw:
self._normalize_bool_changed()
self.draw() # draws with respect to autolim etc.
if hasattr(line, "append"):
return line[0]
else:
return line
def _is_errorbar_plotted(self, label):
if label in self.errorbar_data:
return self.errorbar_data[label]
else:
return False
def errorbar(self, x, y, ax=0, **kwargs):
""" Additional (to normal matplotlib plot method) kwargs:
- (bool) nodraw If True, will not draw canvas
- (str) fmt like in matplotlib errorbar(), but it is stupid to use it only in one function
"""
self.img_bool = False
fmt, label = self._test_plot_kwargs(kwargs)
axes = self.get_axes()
line = self._is_errorbar_plotted(label)
if len(x) == 0:
print(self.__class__.__name__, "Length of x array is 0.")
return
if not 'xerr' in kwargs:
kwargs['xerr'] = np.zeros(x.shape)
if not 'yerr' in kwargs:
kwargs['yerr'] = np.zeros(y.shape)
self._xerr[label] = kwargs['xerr']
self._yerr[label] = kwargs['yerr']
if len(x) > self.mask_length:
x = self._mask_data(x)
y = self._mask_data(y)
kwargs['xerr'] = self._mask_data(kwargs.pop('xerr'))
kwargs['yerr'] = self._mask_data(kwargs.pop('yerr'))
nodraw = False
if 'nodraw' in kwargs:
if kwargs.pop('nodraw'):
nodraw = True
if type(line) is bool:
print("BasicFigure: Plotting ", label)
if type(ax) == int:
self.errorbar_data[label] = axes[ax].errorbar(x, y, fmt=fmt, **kwargs)
elif hasattr(ax, 'plot'):
self.errorbar_data[label] = ax.plot(x, y, fmt=fmt, **kwargs)
else:
raise TypeError('ax can be an int or the axis itself!')
self.lines_list.append(label)
self.draw_legend()
else:
if line[0].get_animated():
self.set_animation_for_lines(False) # doesn't work otherwise, dunno why.
self._set_errorbar_data(x, y, **kwargs)
if not nodraw:
self._normalize_bool_changed()
self.draw() # draws with respect to autolim etc.
if hasattr(line, "append"):
return line[0]
else:
return line
def _copy_data_btn_fired(self):
print(self.__class__.__name__, ": Trying to copy data to clipboard")
if self.line_selector in self.errorbar_data:
line, caplines, barlinecols = self.errorbar_data[self.line_selector]
x = line.get_xdata()
y = line.get_ydata()
xerr = self._xerr[self.line_selector]
yerr = self._yerr[self.line_selector]
print("xerr = ", xerr)
text = 'x \t y \t x_error \t y_error \n'
for i in range(len(x)):
text += str(x[i]) + "\t" + str(y[i]) + "\t" + str(xerr[i]) + "\t" + str(
yerr[i]) + "\n"
else:
line = self._is_line_in_axes(self.line_selector)
x = line.get_xdata()
y = line.get_ydata()
text = 'x \t y \n'
for i in range(len(x)):
text += str(x[i]) + "\t" + str(y[i]) + "\n"
self.add_to_clip_board(text)
def _set_errorbar_data(self, *args, **kwargs):
x, y = args
label = kwargs['label']
x = np.array(x)
y = np.array(y)
line, caplines, barlinecols = self.errorbar_data[label]
line.set_data(x, y)
xerr = kwargs['xerr']
yerr = kwargs['yerr']
if not (xerr is None and yerr is None):
error_positions = (x - xerr, y), (x + xerr, y), (x, y - yerr), (x, y + yerr)
# Update the caplines
if len(caplines) > 0:
for i, pos in enumerate(error_positions):
caplines[i].set_data(pos)
# Update the error bars
barlinecols[0].set_segments(zip(zip(x - xerr, y), zip(x + xerr, y)))
barlinecols[1].set_segments(zip(zip(x, y - yerr), zip(x, y + yerr)))
def plot(self, x, y=None, ax=0, **kwargs):
""" Additional (to normal matplotlib plot method) kwargs:
- (bool) nodraw If True, will not draw canvas
- (str) fmt like in matplotlib errorbar(), but it is stupid to use it only in one function
"""
self.img_bool = False
fmt, label = self._test_plot_kwargs(kwargs)
axes = self.get_axes()
line = self._is_line_in_axes(label)
if len(x) == 0:
print(self.__class__.__name__, "Length of x array is 0.")
return
if y is None:
y = x
x = np.linspace(1,np.shape(y)[0]+1,num =np.shape(y)[0])
if len(x) > self.mask_length:
x = self._mask_data(x)
y = self._mask_data(y)
nodraw = False
if 'nodraw' in kwargs:
if kwargs.pop('nodraw'):
nodraw = True
if type(line) is bool:
print(self.__class__.__name__, ": Plotting ", label)
if type(ax) == int:
line = axes[ax].plot(x, y, fmt, **kwargs)
elif hasattr(ax, 'plot'):
line = ax.plot(x, y, fmt, **kwargs)
else:
raise TypeError('ax can be an int or the axis itself!')
self.lines_list.append(label)
self.draw_legend()
else:
if line.get_animated():
self.set_animation_for_lines(False) # doesn't work otherwise, dunno why.
line.set_data(x,y)
if not nodraw:
self._normalize_bool_changed()
self.draw() # draws with respect to autolim etc.
# self.start_thread('draw()') # kind of working ...
if hasattr(line, "append"):
return line[0]
else:
return line
def blit(self, x, y, ax=0, **kwargs):
kwargs['animated'] = True
self.img_bool = False
fmt, label = self._test_plot_kwargs(kwargs)
axes = self.get_axes()
line = self._is_line_in_axes(label)
assert len(x) > 0, "BasicFigure: Length of x array is 0"
if len(x) > self.mask_length:
x = self._mask_data(x)
y = self._mask_data(y)
nodraw = False
if 'nodraw' in kwargs:
if kwargs.pop('nodraw'):
nodraw = True
if not self._is_line_in_axes(label):
print(self.__class__.__name__, ": Plotting blitted ", label)
axes[ax].plot(x, y, fmt, **kwargs)
self.lines_list.append(label)
self.draw_legend()
self.figure.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.axes_selector.bbox)
self.refresh_lines(ax)
else:
l = self._is_line_in_axes(label)
if not l.get_animated():
self.set_animation_for_lines(True)
self.blit(x, y, ax=0, **kwargs)
self.canvas.restore_region(self.background)
self._setlinedata(x, y , ax, **kwargs)
self.refresh_lines(ax)
self.canvas.blit(self.axes_selector.bbox)
self._normalize_bool_changed()
def _setlinedata(self, x, y, ax, **kwargs):
x = np.array(x)
y = np.array(y)
l = self._is_line_in_axes(kwargs['label'])
l.set_data(x,y)
def mpl_setup(self):
print(self.__class__.__name__, ": Running mpl_setup - connecting button press events")
self.canvas = self.figure.canvas # creates link (same object)
cid = self.figure.canvas.mpl_connect('button_press_event', self.__onclick)
def __onclick(self, event):
if event is None:
return None
self.clickdata = (event.button, event.x, event.y, event.xdata, event.ydata)
if not self.img_bool:
self.set_animation_for_lines(False)
print(self.__class__.__name__, ": %s" % event)
def set_animation_for_lines(self, TF):
self.animated = TF
axes = self.get_axes()
for ax in axes:
for l in ax.get_lines():
l.set_animated(TF)
ax.relim()
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.axes_selector.bbox)
def refresh_lines(self, ax):
axes = self.get_axes()
for line in axes[ax].get_lines():
axes[ax].draw_artist(line)
self.canvas.update()
def draw_legend(self, ax=None):
if self.draw_legend_bool:
print(self.__class__.__name__, ": Drawing Legend")
axes = self.figure.get_axes()
if ax == None:
for ax in axes:
leg = ax.legend(loc=0, fancybox=True)
else:
axes[ax].legend(loc=0, fancybox=True)
def options_group(self):
g = HGroup(
UItem('options_btn'),
UItem('clear_btn'),
UItem('line_selector', visible_when='not img_bool'),
UItem('copy_data_btn', visible_when='not img_bool'),
Item('normalize_bool', label='normalize'),
Item('log_bool', label='log scale'),
Item('draw_legend_bool', label='draw legend'),
Item('cmap_selector', label='cmap', visible_when='img_bool'),
UItem('image_slider_btn', visible_when='img_bool'),
UItem('save_fig_btn'),
)
return g
def options_group_axes_sel(self):
g = HGroup(
UItem('options_btn'),
UItem('clear_btn'),
UItem('line_selector', visible_when='not img_bool'),
UItem('copy_data_btn', visible_when='not img_bool'),
Item('axes_selector'),
Item('normalize_bool', label='normalize'),
Item('log_bool', label='log scale'),
Item('draw_legend_bool', label='draw legend'),
Item('cmap_selector', label='cmap', visible_when='img_bool'),
UItem('image_slider_btn', visible_when='img_bool'),
UItem('save_fig_btn'),
)
return g
def traits_view(self):
trait_view = View(
UItem('figure', editor=MPLFigureEditor(), style='custom'),
Include('options_group'),
handler=MPLInitHandler,
resizable=True,
)
return trait_view
def traits_scroll_view(self):
traits_scroll_view = View(
UItem('figure', editor=ScrollableMPLFigureEditor(), style='custom'),
Include('options_group'),
handler=MPLInitHandler,
resizable=True,
# scrollable=True,
)
return traits_scroll_view
def traits_multiple_axes_view(self):
traits_scroll_view = View(
UItem('figure', editor=MPLFigureEditor(), style='custom'),
Include('options_group_axes_sel'),
handler=MPLInitHandler,
resizable=True,
)
return traits_scroll_view
def image_slider_view(self):
g = View(
VGroup(
Item('vmin', label='min', style='custom', visible_when='img_bool'),
Item('vmax', label='max', style='custom', visible_when='img_bool'),
Item('autoscale'),
),
resizable=True,
)
return g
def traits_options_view(self):
traits_options_view = View(
Item('axes_selector'),
Item('title'),
Item('xlabel'),
Item('ylabel'),
Item('zlabel'),
Item('fontsize'),
HGroup(
Item('grid'),
Item('autoscale'),
Item('mask_data_bool', label='mask data', visible_when='not img_bool'),
Item('mask_length', width=-50, visible_when='not img_bool'),
),
Item('clickdata', style='readonly'),
resizable=True,
)
return traits_options_view
class WidgetFigure(BasicFigure):
nColorsFromColormap = Int(5)
unlock_all_btn = Button('(Un-) Lock')
widget_list = List()
widget_sel = Enum(values='widget_list')
widget_clear_btn = Button('Clear Current Widgets')
drawn_patches = List()
drawn_patches_names = List()
patch_data = List # [patch no, 2D Arr]
drawn_lines = List()
drawn_lines_names= List()
drawn_lines_selector = Enum(values='drawn_lines_names')
line_width = Range(0, 1000, 0, tooltip='average over number of lines in both directions.')
line_interpolation_order = Range(0, 5, 1)
line_data = List # [line no, xvals, yvals]
def _widget_list_default(self):
w = list()
w.append('Line Selector')
w.append('Rectangle Selector')
return w
def _widget_sel_default(self):
w = 'Line Selector'
return w
def _widget_clear_btn_fired(self):
if self.widget_sel == self.widget_list[0]:
self.clear_lines()
if self.widget_sel == self.widget_list[1]:
self.clear_patches()
@on_trait_change('widget_sel')
def set_selector(self,widget):
if widget == self.widget_list[0]:
self._line_selector()
if widget == self.widget_list[1]:
self._rectangle_selector()
def act_all(self):
for i in self.drawn_patches: i.connect()
for i in self.drawn_lines: i.connect()
@on_trait_change('drawn_lines:lineReleased, drawn_lines:lineUpdated, line_width, line_interpolation_order, img_data[]')
def get_line_data(self):
line_data = []
for line in self.drawn_lines_names:
x, y = self.get_widget_line(line).line.get_data()
len_x = abs(x[1] - x[0])
len_y = abs(y[1] - y[0])
len_line = np.sqrt(len_x ** 2 + len_y ** 2)
x_float = np.linspace(x[0], x[1], len_line)
y_float = np.linspace(y[0], y[1], len_line)
x, y = x_float.astype(np.int), y_float.astype(np.int)
data = []
for i in range(-self.line_width, self.line_width + 1):
n1, n2 = self.get_normal(x[0], x[1], y[0], y[1])
n1 = int(n1)
n2 = int(n2)
zi = ndimage.map_coordinates(self.img_data, np.vstack((y_float+n2*i, x_float+n1*i)),
order=self.line_interpolation_order)
data.append(zi)
line_cut_mean = np.mean(data, axis=0)
xvals = np.arange(0, line_cut_mean.shape[0], 1)
line_data.append(np.array([xvals, line_cut_mean]))
self.line_data = line_data
@staticmethod
def get_normal(x1, x2, y1, y2):
"""
calculates the normalized normal vector to the straight line defined by x1, x2, y1, y2
thx Denis!
:param x1: float
:param x2: float
:param y1: float
:param y2: float
:return: normalized normal vector with one component = 1 and one component < 1
"""
delta_x = float(x1-x2)
delta_y = float(y1-y2)
if delta_y != 0:
n1 = 1.0
n2 = -delta_x/delta_y
if abs(n2) > 1.0:
n1 = n1/n2
n2 = 1.0
else:
n1 = 0.0
n2 = 1.0
return n1, n2
@on_trait_change('line_width')
def _update_line_width(self):
if self.line_selector is None:
line = 'line 0'
else:
line = self.get_widget_line(self.line_selector)
self.get_widget_line(line).drl.line.set_linewidth(2 * self.line_width + 1) # how to avoid this?!
self.get_widget_line(line).drl.line.set_alpha((np.exp(-self.line_width / 20))) # exponentially decreasing alpha
self.draw()
def _line_selector(self):
try:
self.rs.disconnect_events()
DraggableResizeableRectangle.lock = True
print('Rectangles are locked')
except:
print('Rectangles could not be locked')
print(self.__class__.__name__, ": Connecting Line Selector")
DraggableResizeableLine.lock = None
self.ls = RectangleSelector(self.axes_selector, self.line_selector_func, drawtype='line', useblit=True, button=[3])
def line_selector_func(self, eclick, erelease, cmap=mpl.cm.jet):
print(self.__class__.__name__, "Line Selector:")
print(self.__class__.__name__, "eclick: {} \n erelease: {}".format(eclick, erelease))
print()
x0, y0 = eclick.xdata, eclick.ydata
x1, y1 = erelease.xdata, erelease.ydata
cNorm = mpl.colors.Normalize(vmin=0, vmax=self.nColorsFromColormap)
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmap)
color = scalarMap.to_rgba(len(self.drawn_lines) + 1)
text = 'line ' + str(len(self.drawn_lines))
line = AnnotatedLine(self.axes_selector,x0, y0, x1, y1, text=text, color=color)
self.drawn_lines_names.append(line.text)
self.drawn_lines.append(line)
self.canvas.draw()
def get_widget_line(self, line_name):
line_handle = None
for i, line in enumerate(self.drawn_lines):
if line.text == line_name:
line_handle = line
break
return line_handle
def clear_lines(self):
print(self.__class__.__name__, ": Clearing selection lines")
if len(self.drawn_lines) != 0:
print(self.__class__.__name__, ": Clearing selection lines")
for l in self.drawn_lines:
try:
l.remove()
except ValueError:
print(self.__class__.__name__, ": Line was not found.")
self.drawn_lines = []
self.drawn_lines_names = []
self.canvas.draw()
@on_trait_change('drawn_patches:rectUpdated')
def calculate_picture_region_sum(self):
data = []
for p in self.drawn_patches:
x1, y1 = p.rectangle.get_xy()
x2 = x1 + p.rectangle.get_width()
y2 = y1 + p.rectangle.get_height()
if p.rectangle.get_width() < 0:
x2, x1 = x1, x2
if p.rectangle.get_height() < 0:
y2, y1 = y1, y2
if p.rectangle.get_width() == 0 or p.rectangle.get_height() == 0:
print('Zero Patch dimension')
# data & extent
data.append([self.img_data[int(y1):int(y2),int(x1):int(x2)], [int(x1), int(x2), int(y1), int(y2)]])
self.patch_data = data
def _rectangle_selector(self):
try:
self.ls.disconnect_events()
DraggableResizeableLine.lock = True
print('Line Selector is locked')
except:
print('Line Selector could not be locked')
DraggableResizeableRectangle.lock = None
print(self.__class__.__name__, ": Connecting Rectangle Selector")
self.rs = RectangleSelector(self.axes_selector, self.rectangle_selector_func, drawtype='box', useblit=True, button=[3])
def rectangle_selector_func(self, eclick, erelease, cmap=mpl.cm.jet):
"""
Usage:
@on_trait_change('fig:selectionPatches:rectUpdated')
function name:
for p in self.fig.selectionPatches:
do p
"""
print(self.__class__.__name__, "Rectangle Selector:")
print(self.__class__.__name__, "eclick: {} \n erelease: {}".format(eclick, erelease))
print()
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
cNorm = mpl.colors.Normalize(vmin=0, vmax=self.nColorsFromColormap)
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmap)
color = scalarMap.to_rgba(len(self.drawn_patches) + 1)
self.an_rect = AnnotatedRectangle(self.axes_selector, x1, y1, x2, y2, 'region ' + str(len(self.drawn_patches)), color=color)
self.drawn_patches_names.append(self.an_rect.text)
self.drawn_patches.append(self.an_rect)
self.canvas.draw()
def get_widget_patch(self, patch_name):
patch = None
for i, rect in enumerate(self.drawn_patches):
if rect.text == patch_name:
patch = rect
break
return patch
def clear_patches(self):
if len(self.drawn_patches) != 0:
print(self.__class__.__name__, ": Clearing selection patches")
for p in self.drawn_patches:
try:
p.remove()
except ValueError:
print(self.__class__.__name__, ": Patch was not found.")
DraggableResizeableLine.reset_borders()
DraggableResizeableRectangle.reset_borders()
self.drawn_patches = []
self.canvas.draw()
def clear_widgets(self):
self.clear_patches()
self.clear_lines()
def options_group(self):
g = HGroup(
VGroup(
HGroup(
UItem('options_btn'),
UItem('clear_btn'),
UItem('line_selector', visible_when='not img_bool'),
UItem('copy_data_btn', visible_when='not img_bool'),
Item('normalize_bool', label='normalize'),
Item('log_bool', label='log scale'),
Item('cmap_selector', label='cmap', visible_when='img_bool'),
UItem('image_slider_btn', visible_when='img_bool'),
UItem('save_fig_btn'),
label='Basic'
),
HGroup(
UItem('widget_sel'),
UItem('widget_clear_btn'),
Item('drawn_lines_selector', label='drawn lines', tooltip='select here line for line property (e.g. width) changes'),
Item('line_width', tooltip='average over number of lines in both directions.'),
Item('line_interpolation_order'),
label='Widgets'
),
layout='tabbed',
),
)
return g
class BlittedFigure(BasicFigure):
def plot(self, *args, **kwargs):
self.blit(*args, **kwargs)
if __name__ == '__main__':
# minimal_figure = MinimalFigure(figsize=(6 * 1.618, 6), facecolor='w', tight_layout=True)
# minimal_figure.configure_traits(view='traits_view')
# basic_figure = BasicFigure(figsize=(5 * 1.618, 5), facecolor='w', tight_layout=True)
# basic_figure.configure_traits()
# basic_figure.configure_traits(view='traits_multiple_axes_view')
# basic_figure.configure_traits(view='traits_scroll_view')
minimal_figure = WidgetFigure(figsize=(6, 6), facecolor='w', tight_layout=True)
minimal_figure.configure_traits(view='traits_view')
|
|
from django.conf import settings
from django.utils import translation
from elasticsearch_dsl import F, query
from elasticsearch_dsl.filter import Bool
from rest_framework.filters import BaseFilterBackend
import mkt
from mkt.api.base import form_errors, get_region_from_request
from mkt.constants.applications import get_device_id
from mkt.features.utils import get_feature_profile
class SearchQueryFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that scores the given ES queryset
with a should query based on the search query found in the current
request's query parameters.
"""
def _get_locale_analyzer(self, lang):
analyzer = mkt.SEARCH_LANGUAGE_TO_ANALYZER.get(lang)
if (analyzer in mkt.SEARCH_ANALYZER_PLUGINS and
not settings.ES_USE_PLUGINS):
analyzer = None
return analyzer
def filter_queryset(self, request, queryset, view):
q = request.GET.get('q', '').lower()
lang = translation.get_language()
analyzer = self._get_locale_analyzer(lang)
if not q:
return queryset
should = []
rules = [
(query.Match, {'query': q, 'boost': 3, 'analyzer': 'standard'}),
(query.Match, {'query': q, 'boost': 4, 'type': 'phrase',
'slop': 1}),
(query.Prefix, {'value': q, 'boost': 1.5}),
]
# Only add fuzzy queries if q is a single word. It doesn't make sense
# to do a fuzzy query for multi-word queries.
if ' ' not in q:
rules.append(
(query.Fuzzy, {'value': q, 'boost': 2, 'prefix_length': 1}))
# Apply rules to search on few base fields. Some might not be present
# in every document type / indexes.
for k, v in rules:
for field in ('name', 'short_name', 'title', 'app_slug', 'author',
'url_tokenized'):
should.append(k(**{field: v}))
# Exact matches need to be queried against a non-analyzed field. Let's
# do a term query on `name.raw` for an exact match against the item
# name and give it a good boost since this is likely what the user
# wants.
should.append(query.Term(**{'name.raw': {'value': q, 'boost': 10}}))
# Do the same for GUID searches.
should.append(query.Term(**{'guid': {'value': q, 'boost': 10}}))
# If query is numeric, check if it is an ID.
if q.isnumeric():
should.append(query.Term(**{'id': {'value': q, 'boost': 10}}))
if analyzer:
should.append(
query.Match(**{'name_l10n_%s' % analyzer: {'query': q,
'boost': 2.5}}))
should.append(
query.Match(**{'short_name_l10n_%s' % analyzer: {
'query': q,
'boost': 2.5}}))
# Add searches on the description field.
should.append(
query.Match(description={'query': q, 'boost': 0.8,
'type': 'phrase'}))
if analyzer:
desc_field = 'description_l10n_%s' % analyzer
desc_analyzer = ('%s_analyzer' % analyzer
if analyzer in mkt.STEMMER_MAP else analyzer)
should.append(
query.Match(
**{desc_field: {'query': q, 'boost': 0.6, 'type': 'phrase',
'analyzer': desc_analyzer}}))
# Add searches on tag field.
should.append(query.Term(tags={'value': q}))
if ' ' not in q:
should.append(query.Fuzzy(tags={'value': q, 'prefix_length': 1}))
# The list of functions applied to our `function_score` query.
functions = [
query.SF('field_value_factor', field='boost'),
]
# Add a boost for the preferred region, if it exists.
region = get_region_from_request(request)
if region:
functions.append({
'filter': {'term': {'preferred_regions': region.id}},
# TODO: When we upgrade to Elasticsearch 1.4, change this
# to 'weight'.
'boost_factor': 4,
})
return queryset.query('function_score',
query=query.Bool(should=should),
functions=functions)
class SearchFormFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters the given queryset
based on `self.form_class`.
"""
# A mapping of form fields to Elasticsearch fields for those that differ.
FORM_TO_FIELD_MAP = {
'author': 'author.raw',
'cat': 'category',
'has_info_request': 'latest_version.has_info_request',
'has_editor_comment': 'latest_version.has_editor_comment',
'languages': 'supported_locales',
'offline': 'is_offline',
'premium_types': 'premium_type',
'tag': 'tags'
}
def filter_queryset(self, request, queryset, view):
form = view.form_class(request.GET)
if not form.is_valid():
raise form_errors(form)
self.form_data = form.cleaned_data
data = {}
for k, v in self.form_data.items():
data[self.FORM_TO_FIELD_MAP.get(k, k)] = v
# Must filters.
must = []
for field in self.VALID_FILTERS:
value = data.get(field)
if value is not None:
if type(value) == list:
filter_type = 'terms'
else:
filter_type = 'term'
must.append(F(filter_type, **{field: value}))
if must:
return queryset.filter(Bool(must=must))
return queryset
class PublicSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['app_type', 'author.raw', 'category', 'device', 'guid',
'installs_allowed_from', 'is_offline', 'manifest_url',
'premium_type', 'supported_locales', 'tags']
class ReviewerSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['app_type', 'author.raw', 'category', 'device',
'latest_version.has_editor_comment',
'latest_version.has_info_request',
'latest_version.status',
'installs_allowed_from', 'is_escalated', 'is_offline',
'manifest_url', 'premium_type', 'status',
'supported_locales', 'tags']
def filter_queryset(self, request, queryset, view):
queryset = super(ReviewerSearchFormFilter,
self).filter_queryset(request, queryset, view)
# Special case for `is_tarako`, which gets converted to a tag filter.
is_tarako = self.form_data.get('is_tarako')
if is_tarako is not None:
if is_tarako:
queryset = queryset.filter(
Bool(must=[F('term', tags='tarako')]))
else:
queryset = queryset.filter(
Bool(must=[~F('term', tags='tarako')]))
return queryset
class WebsiteSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['keywords', 'category', 'device']
class ReviewerWebsiteSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['keywords', 'category', 'device', 'status', 'is_disabled']
class PublicAppsFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters only public items --
those with PUBLIC status and not disabled.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Bool(must=[F('term', status=mkt.STATUS_PUBLIC),
F('term', is_disabled=False)]))
class ValidAppsFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters only valid items --
those with any valid status and not disabled or deleted.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Bool(must=[F('terms', status=mkt.VALID_STATUSES),
F('term', is_disabled=False)]))
class DeviceTypeFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters based on the matching
device type provided.
"""
def filter_queryset(self, request, queryset, view):
device_id = get_device_id(request)
data = {
'gaia': getattr(request, 'GAIA', False),
'mobile': getattr(request, 'MOBILE', False),
'tablet': getattr(request, 'TABLET', False),
}
flash_incompatible = data['mobile'] or data['gaia']
if device_id:
queryset = queryset.filter(
Bool(must=[F('term', device=device_id)]))
if flash_incompatible:
queryset = queryset.filter(
Bool(must_not=[F('term', uses_flash=True)]))
return queryset
class RegionFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters based on the matching
region provided.
"""
def filter_queryset(self, request, queryset, view):
region = get_region_from_request(request)
if region:
return queryset.filter(
Bool(must_not=[F('term', region_exclusions=region.id)]))
return queryset
class ProfileFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters based on the feature
profile provided.
"""
def filter_queryset(self, request, queryset, view):
profile = get_feature_profile(request)
if profile:
must_not = []
for k in profile.to_kwargs(prefix='features.has_').keys():
must_not.append(F('term', **{k: True}))
if must_not:
return queryset.filter(Bool(must_not=must_not))
return queryset
class SortingFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that applies sorting based on the
form data provided.
"""
DEFAULT_SORTING = {
'popularity': '-popularity',
'rating': '-bayesian_rating',
'created': '-created',
'reviewed': '-reviewed',
'name': 'name_sort',
'trending': '-trending',
}
def _get_regional_sort(self, region, field):
"""
A helper method to return the sort field with region for mature
regions, otherwise returns the field.
"""
if region and not region.adolescent:
return ['-%s_%s' % (field, region.id)]
return ['-%s' % field]
def filter_queryset(self, request, queryset, view):
region = get_region_from_request(request)
search_query = request.GET.get('q')
sort = request.GET.getlist('sort')
# When querying (with `?q=`) we want to sort by relevance. If no query
# is provided and no `?sort` is provided, i.e. we are only applying
# filters which don't affect the relevance, we sort by popularity
# descending.
order_by = None
if not search_query:
order_by = self._get_regional_sort(region, 'popularity')
if sort:
if 'popularity' in sort:
order_by = self._get_regional_sort(region, 'popularity')
elif 'trending' in sort:
order_by = self._get_regional_sort(region, 'trending')
else:
order_by = [self.DEFAULT_SORTING[name] for name in sort
if name in self.DEFAULT_SORTING]
if order_by:
return queryset.sort(*order_by)
return queryset
class OpenMobileACLFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that finds apps using openmobile_acl
feature flag.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Bool(must=[F('term', **{'features.has_openmobileacl': True})]))
|
|
from __future__ import print_function,division
import logging
import subprocess as sp
import os, re
import time
try:
import numpy as np
import pandas as pd
from astropy.units import UnitsError
from astropy.coordinates import SkyCoord
except ImportError:
np, pd = (None, None)
UnitsError, SkyCoord = (None, None)
from .extinction import get_AV_infinity
NONMAG_COLS = ['Gc','logAge', '[M/H]', 'm_ini', 'logL', 'logTe', 'logg',
'm-M0', 'Av', 'm2/m1', 'mbol', 'Mact'] #all the rest are mags
def get_trilegal(filename,ra,dec,folder='.', galactic=False,
filterset='kepler_2mass',area=1,maglim=27,binaries=False,
trilegal_version='1.6',sigma_AV=0.1,convert_h5=True):
"""Runs get_trilegal perl script; optionally saves output into .h5 file
Depends on a perl script provided by L. Girardi; calls the
web form simulation, downloads the file, and (optionally) converts
to HDF format.
Uses A_V at infinity from :func:`utils.get_AV_infinity`.
.. note::
Would be desirable to re-write the get_trilegal script
all in python.
:param filename:
Desired output filename. If extension not provided, it will
be added.
:param ra,dec:
Coordinates (ecliptic) for line-of-sight simulation.
:param folder: (optional)
Folder to which to save file. *Acknowledged, file control
in this function is a bit wonky.*
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param area: (optional)
Area of TRILEGAL simulation [sq. deg]
:param maglim: (optional)
Limiting magnitude in first mag (by default will be Kepler band)
If want to limit in different band, then you have to
got directly to the ``get_trilegal`` perl script.
:param binaries: (optional)
Whether to have TRILEGAL include binary stars. Default ``False``.
:param trilegal_version: (optional)
Default ``'1.6'``.
:param sigma_AV: (optional)
Fractional spread in A_V along the line of sight.
:param convert_h5: (optional)
If true, text file downloaded from TRILEGAL will be converted
into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'``
path.
"""
if galactic:
l, b = ra, dec
else:
try:
c = SkyCoord(ra,dec)
except UnitsError:
c = SkyCoord(ra,dec,unit='deg')
l,b = (c.galactic.l.value,c.galactic.b.value)
if os.path.isabs(filename):
folder = ''
if not re.search('\.dat$',filename):
outfile = '{}/{}.dat'.format(folder,filename)
else:
outfile = '{}/{}'.format(folder,filename)
AV = get_AV_infinity(l,b,frame='galactic')
#cmd = 'get_trilegal %s %f %f %f %i %.3f %.2f %s 1 %.1f %s' % (trilegal_version,l,b,
# area,binaries,AV,sigma_AV,
# filterset,maglim,outfile)
#sp.Popen(cmd,shell=True).wait()
trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,outfile)
if convert_h5:
df = pd.read_table(outfile, sep='\s+', skipfooter=1, engine='python')
df = df.rename(columns={'#Gc':'Gc'})
for col in df.columns:
if col not in NONMAG_COLS:
df.rename(columns={col:'{}_mag'.format(col)},inplace=True)
if not re.search('\.h5$', filename):
h5file = '{}/{}.h5'.format(folder,filename)
else:
h5file = '{}/{}'.format(folder,filename)
df.to_hdf(h5file,'df')
with pd.HDFStore(h5file) as store:
attrs = store.get_storer('df').attrs
attrs.trilegal_args = {'version':trilegal_version,
'ra':ra, 'dec':dec,
'l':l,'b':b,'area':area,
'AV':AV, 'sigma_AV':sigma_AV,
'filterset':filterset,
'maglim':maglim,
'binaries':binaries}
os.remove(outfile)
def trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,
outfile):
"""Calls TRILEGAL webserver and downloads results file.
:param trilegal_version:
Version of trilegal (only tested on 1.6).
:param l,b:
Coordinates (galactic) for line-of-sight simulation.
:param area:
Area of TRILEGAL simulation [sq. deg]
:param binaries:
Whether to have TRILEGAL include binary stars. Default ``False``.
:param AV:
Extinction along the line of sight.
:param sigma_AV:
Fractional spread in A_V along the line of sight.
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param maglim:
Limiting magnitude in mag (by default will be 1st band of filterset)
If want to limit in different band, then you have to
change function directly.
:param outfile:
Desired output filename.
"""
webserver = 'http://stev.oapd.inaf.it'
args = [l,b,area,AV,sigma_AV,filterset,maglim,1,binaries]
mainparams = ('imf_file=tab_imf%2Fimf_chabrier_lognormal.dat&binary_frac=0.3&'
'binary_mrinf=0.7&binary_mrsup=1&extinction_h_r=100000&extinction_h_z='
'110&extinction_kind=2&extinction_rho_sun=0.00015&extinction_infty={}&'
'extinction_sigma={}&r_sun=8700&z_sun=24.2&thindisk_h_r=2800&'
'thindisk_r_min=0&thindisk_r_max=15000&thindisk_kind=3&thindisk_h_z0='
'95&thindisk_hz_tau0=4400000000&thindisk_hz_alpha=1.6666&'
'thindisk_rho_sun=59&thindisk_file=tab_sfr%2Ffile_sfr_thindisk_mod.dat&'
'thindisk_a=0.8&thindisk_b=0&thickdisk_kind=0&thickdisk_h_r=2800&'
'thickdisk_r_min=0&thickdisk_r_max=15000&thickdisk_h_z=800&'
'thickdisk_rho_sun=0.0015&thickdisk_file=tab_sfr%2Ffile_sfr_thickdisk.dat&'
'thickdisk_a=1&thickdisk_b=0&halo_kind=2&halo_r_eff=2800&halo_q=0.65&'
'halo_rho_sun=0.00015&halo_file=tab_sfr%2Ffile_sfr_halo.dat&halo_a=1&'
'halo_b=0&bulge_kind=2&bulge_am=2500&bulge_a0=95&bulge_eta=0.68&'
'bulge_csi=0.31&bulge_phi0=15&bulge_rho_central=406.0&'
'bulge_cutoffmass=0.01&bulge_file=tab_sfr%2Ffile_sfr_bulge_zoccali_p03.dat&'
'bulge_a=1&bulge_b=-2.0e9&object_kind=0&object_mass=1280&object_dist=1658&'
'object_av=1.504&object_avkind=1&object_cutoffmass=0.8&'
'object_file=tab_sfr%2Ffile_sfr_m4.dat&object_a=1&object_b=0&'
'output_kind=1').format(AV,sigma_AV)
cmdargs = [trilegal_version,l,b,area,filterset,1,maglim,binaries,mainparams,
webserver,trilegal_version]
cmd = ("wget -o lixo -Otmpfile --post-data='submit_form=Submit&trilegal_version={}"
"&gal_coord=1&gc_l={}&gc_b={}&eq_alpha=0&eq_delta=0&field={}&photsys_file="
"tab_mag_odfnew%2Ftab_mag_{}.dat&icm_lim={}&mag_lim={}&mag_res=0.1&"
"binary_kind={}&{}' {}/cgi-bin/trilegal_{}").format(*cmdargs)
complete = False
while not complete:
notconnected = True
busy = True
print("TRILEGAL is being called with \n l={} deg, b={} deg, area={} sqrdeg\n "
"Av={} with {} fractional r.m.s. spread \n in the {} system, complete down to "
"mag={} in its {}th filter, use_binaries set to {}.".format(*args))
sp.Popen(cmd,shell=True).wait()
if os.path.exists('tmpfile') and os.path.getsize('tmpfile')>0:
notconnected = False
else:
print("No communication with {}, will retry in 2 min".format(webserver))
time.sleep(120)
if not notconnected:
with open('tmpfile','r') as f:
lines = f.readlines()
for line in lines:
if 'The results will be available after about 2 minutes' in line:
busy = False
break
sp.Popen('rm -f lixo tmpfile',shell=True)
if not busy:
filenameidx = line.find('<a href=../tmp/') +15
fileendidx = line[filenameidx:].find('.dat')
filename = line[filenameidx:filenameidx+fileendidx+4]
print("retrieving data from {} ...".format(filename))
while not complete:
time.sleep(40)
modcmd = 'wget -o lixo -O{} {}/tmp/{}'.format(filename,webserver,filename)
modcall = sp.Popen(modcmd,shell=True).wait()
if os.path.getsize(filename)>0:
with open(filename,'r') as f:
lastline = f.readlines()[-1]
if 'normally' in lastline:
complete = True
print('model downloaded!..')
if not complete:
print('still running...')
else:
print('Server busy, trying again in 2 minutes')
time.sleep(120)
sp.Popen('mv {} {}'.format(filename,outfile),shell=True).wait()
print('results copied to {}'.format(outfile))
|
|
import enum
class Severity(enum.IntEnum):
info = 0 # Problem with the scanner
notice = 1 # Speculative weakness
warning = 2 # Theoretical weakness
error = 4 # Expensive attack
critical = 8 # Cheap attack
class Issue(object):
def __init__(self, severity, what, details=None, instructions=None):
self.severity = severity
self.what = what
self.details = details
self.instructions = instructions
def __str__(self):
return "{0}! {1}".format(self.severity.name.upper(), self.what)
def to_dict(self):
return self.__dict__
class CipherMode(enum.IntEnum):
CBC = 0
STREAM = 1 # or CTR
AEAD = 2
class Cipher(object):
def __init__(self, mode, *args):
self.mode = mode
self.issues = args
class MACMode(enum.IntEnum):
EAM = 0
ETM = 1
class MAC(object):
def __init__(self, mode, *args):
self.mode = mode
self.issues = args
def issue_unknown(algo_type, algo_name):
return Issue(
Severity.info,
"Unknown {0} algorithm: {1}".format(algo_type, algo_name),
"""The SSHLabs Scanner does not know anything about {0} algorithm {1}. It could
be perfectly safe. Or not.""",
"""No action required. If you know what this algorithm is, consider sending a
pull request to https://github.com/stribika/sshlabs."""
)
def issue_kex_dh_small_group(severity, algo, size):
return Issue(
severity,
"Key exchange: small DH group",
"""The security of the Diffie-Hellman key exchange relies on the difficulty of
the discrete logarithm problem. The server is configured to use {0}, which uses
a prime modulus too small (only {1} bits) to provide forward secrecy.""".format(algo, size),
"""Remove {0} from the KexAlgorithms line in /etc/ssh/sshd_config, then send
SIGHUP to sshd.""".format(algo)
)
def issue_kex_weak_hash(severity, algo):
return Issue(
severity,
"Key exchange: weak hash",
"""The downgrade resistance of the SSH protocol relies on using a collision
resistant hash function for deriving the symmetric keys from the shared secret
established during the key exchange. The server is configured to use {0}, which
uses a weak hash function, that does not provide downgrade resistance.""".format(algo),
"""Remove {0} from the KexAlgorithms line in /etc/ssh/sshd_config, then send
SIGHUP to sshd.""".format(algo)
)
def issue_kex_dh_gex_small_group(severity, group, size):
return Issue(
severity,
"Key exchange: small DH group",
"""The security of the Diffie-Hellman key exchange relies on the difficulty of
the discrete logarithm problem. The server is configured to use Diffie-Hellman
group exchange, and uses a prime modulus too small (only {0} bits) to provide
forward secrecy.""".format(size),
"""Remove the line with prime modulus {0:x} and generator {1:x} from
/etc/ssh/moduli with the following commands:
awk '$5 > 2000' /etc/ssh/moduli > "${{HOME}}/moduli"
wc -l "${{HOME}}/moduli" # make sure there is something left
mv "${{HOME}}/moduli" /etc/ssh/moduli
It is possible that the moduli file does not exist, or contains no safe groups.
In this case, regenerate it with the following commands:
ssh-keygen -G /etc/ssh/moduli.all -b 4096
ssh-keygen -T /etc/ssh/moduli.safe -f /etc/ssh/moduli.all
mv /etc/ssh/moduli.safe /etc/ssh/moduli
rm /etc/ssh/moduli.all""".format(group.prime, group.generator)
)
def issue_kex_dh_gex_unsafe_group(severity, group):
return Issue(
severity,
"Key exchange: unsafe DH group",
"""The security of the Diffie-Hellman key exchange relies on the difficulty of
the discrete logarithm problem. If the modulus is not a safe prime, it may be
possible to solve DLP in polynomial time.""",
"""Remove the line with prime modulus {0:x} and generator {1:x} from
/etc/ssh/moduli. It is possible that the moduli file does not exist, or contains
no safe groups. In this case, regenerate it with the following commands:
ssh-keygen -G /etc/ssh/moduli.all -b 4096
ssh-keygen -T /etc/ssh/moduli.safe -f /etc/ssh/moduli.all
mv /etc/ssh/moduli.safe /etc/ssh/moduli
rm /etc/ssh/moduli.all""".format(group.prime, group.generator)
)
def issue_kex_ecdh_unsafe_curve(severity, algo):
return Issue(
severity,
"Key exchange: unsafe elliptic curve",
"""The elliptic curve used by {0} does not meet the SafeCurves criteria. This
means they are unnecessarily difficult to implement safely.""".format(algo),
"""Remove {0} from the KexAlgorithms line in /etc/ssh/sshd_config, then send
SIGHUP to sshd.""".format(algo)
)
def issue_sign_dsa(severity, algo):
return Issue(
severity,
"Signature: requires per-signature entropy",
"""The {0} host key algorithm requires entropy for each signature, and leaks the
secret key if the random values are predictable or reused even once.""".format(algo),
"""Delete the {0} host key files from /etc/ssh, and the HostKey line from
/etc/ssh/sshd_config referring to these files. If there are no HostKey lines at
all, add the key files you wish to use.""".format(algo)
)
def issue_sign_ecdsa_unsafe_curve(severity, algo):
return Issue(
severity,
"Signature: unsafe elliptic curve",
"""The elliptic curve used by {0} does not meet the SafeCurves criteria. This
means they are unnecessarily difficult to implement safely.""".format(algo),
"""Delete the {0} host key files from /etc/ssh, and the HostKey line from
/etc/ssh/sshd_config referring to these files. If there are no HostKey lines at
all, add the key files you wish to use.""".format(algo)
)
def issue_sign_small_key(severity, algo, size):
return Issue(
severity,
"Signature: small key size",
"""The host key used by {0} is only {1} bits, small enough to be
bruteforced.""".format(algo, size),
"""Delete the {0} host key files from /etc/ssh, then if larger keys are
supported, create them with ssh-keygen. Otherwise remove the HostKey line from
/etc/ssh/sshd_config referring to these files. If there are no HostKey lines at
all, add the key files you wish to use.""".format(algo)
)
def issue_cipher_small_block(severity, algo, size):
return Issue(
severity,
"Cipher: small block size",
"""The block size of the {0} cipher is only {1} bits. Repeated ciphertext blocks
leak information about the plaintext.""".format(algo, size),
"""Remove {0} from the Ciphers line in /etc/ssh/sshd_config, then send SIGHUP to
sshd.""".format(algo)
)
def issue_cipher_weak(severity, algo):
return Issue(
severity,
"Cipher: weak algorithm",
"""The {0} cipher algorithm is known to be broken.""".format(algo),
"""Remove {0} from the Ciphers line in /etc/ssh/sshd_config, then send SIGHUP to
sshd.""".format(algo)
)
def issue_authencr_cbc_and_mac(severity, cipher, mac):
return Issue(
severity,
"Authenticated encryption: CBC-and-MAC",
"""The correct way to build authenticated encryption from a cipher and a MAC
is to encrypt, then append the MAC of the ciphertext. The server is configured
to encrypt with {0}, and append the {1} of the plaintext. Using a cipher in CBC
mode might lead to padding oracle attacks if implemented incorrectly.""".format(cipher, mac),
"""Remove {0} from the Ciphers line, or remove {1} from the MACs line in
/etc/sshd_config, then send SIGHUP to sshd.""".format(cipher, mac),
)
KEX_DH_GEX_SHA1 = "diffie-hellman-group-exchange-sha1"
KEX_DH_GEX_SHA256 = "diffie-hellman-group-exchange-sha256"
KEX_DH_GROUP1_SHA1 = "diffie-hellman-group1-sha1"
KEX_DH_GROUP14_SHA1 = "diffie-hellman-group14-sha1"
KEX_DH_GROUP14_SHA256 = "diffie-hellman-group14-sha256"
KEX_DH_GROUP15_SHA256 = "diffie-hellman-group15-sha256"
KEX_DH_GROUP16_SHA256 = "diffie-hellman-group16-sha256"
KEX_ECDH_CURVE25519_SHA256 = "curve25519-sha256@libssh.org"
KEX_ECDH_NISTP256_SHA256 = "ecdh-sha2-nistp256"
KEX_ECDH_NISTP384_SHA384 = "ecdh-sha2-nistp384"
KEX_ECDH_NISTP521_SHA512 = "ecdh-sha2-nistp521"
known_kex_algorithms = {
KEX_ECDH_CURVE25519_SHA256: [],
KEX_DH_GROUP1_SHA1: [
issue_kex_dh_small_group(Severity.error, KEX_DH_GROUP1_SHA1, 1024),
issue_kex_weak_hash(Severity.warning, KEX_DH_GROUP1_SHA1),
],
KEX_DH_GROUP14_SHA1: [
issue_kex_weak_hash(Severity.warning, KEX_DH_GROUP14_SHA1),
],
KEX_DH_GROUP14_SHA256: [],
KEX_DH_GROUP15_SHA256: [],
KEX_DH_GROUP16_SHA256: [],
KEX_DH_GEX_SHA1: [
issue_kex_weak_hash(Severity.warning, KEX_DH_GEX_SHA1),
],
KEX_DH_GEX_SHA256: [],
KEX_ECDH_NISTP256_SHA256: [
issue_kex_ecdh_unsafe_curve(Severity.notice, KEX_ECDH_NISTP256_SHA256)
],
KEX_ECDH_NISTP384_SHA384: [
issue_kex_ecdh_unsafe_curve(Severity.notice, KEX_ECDH_NISTP384_SHA384)
],
KEX_ECDH_NISTP521_SHA512: [],
}
known_ciphers = {
"3des-cbc": Cipher(CipherMode.CBC,
issue_cipher_small_block(Severity.warning, "3des-cbc", 64)),
"aes128-cbc": Cipher(CipherMode.CBC),
"aes192-cbc": Cipher(CipherMode.CBC),
"aes256-cbc": Cipher(CipherMode.CBC),
"aes128-ctr": Cipher(CipherMode.STREAM),
"aes192-ctr": Cipher(CipherMode.STREAM),
"aes256-ctr": Cipher(CipherMode.STREAM),
"aes128-gcm@openssh.com": Cipher(CipherMode.AEAD),
"aes256-gcm@openssh.com": Cipher(CipherMode.AEAD),
"arcfour": Cipher(CipherMode.STREAM,
issue_cipher_weak(Severity.error, "arcfour")),
"arcfour128": Cipher(CipherMode.STREAM,
issue_cipher_weak(Severity.error, "arcfour128")),
"arcfour256": Cipher(CipherMode.STREAM,
issue_cipher_weak(Severity.error, "arcfour256")),
"blowfish-cbc": Cipher(CipherMode.CBC,
issue_cipher_small_block(Severity.warning, "blowfish-cbc", 64)),
"cast128-cbc": Cipher(CipherMode.CBC,
issue_cipher_small_block(Severity.warning, "cast128-cbc", 64)),
"chacha20-poly1305@openssh.com": Cipher(CipherMode.AEAD),
}
known_macs = {
"hmac-md5": MAC(MACMode.EAM,
Issue(Severity.notice, "weak HMAC hash", "hmac-md5")),
"hmac-md5-96": MAC(MACMode.EAM,
Issue(Severity.notice, "weak HMAC hash", "hmac-md5-96"),
Issue(Severity.notice, "small MAC tag", "96 bits", "hmac-md5-96")),
"hmac-ripemd160": MAC(MACMode.EAM),
"hmac-sha1": MAC(MACMode.EAM,
Issue(Severity.notice, "weak HMAC hash", "hmac-sha1")),
"hmac-sha1-96": MAC(MACMode.EAM,
Issue(Severity.notice, "weak HMAC hash", "hmac-sha1-96"),
Issue(Severity.notice, "small MAC tag", "96 bits", "hmac-sha1-96")),
"hmac-sha2-256": MAC(MACMode.EAM),
"hmac-sha2-512": MAC(MACMode.EAM),
"umac-64@openssh.com": MAC(MACMode.EAM,
Issue(Severity.notice, "small MAC tag", "64 bits", "umac-64@openssh.com")),
"umac-128@openssh.com": MAC(MACMode.EAM),
"hmac-md5-etm@openssh.com": MAC(MACMode.ETM,
Issue(Severity.notice, "weak HMAC hash", "hmac-md5-etm@openssh.com")),
"hmac-md5-96-etm@openssh.com": MAC(MACMode.ETM,
Issue(Severity.notice, "weak HMAC hash", "hmac-md5-96-etm@openssh.com"),
Issue(Severity.notice, "small MAC tag", "96 bits", "hmac-md5-96-etm@openssh.com")),
"hmac-ripemd160-etm@openssh.com": MAC(MACMode.ETM),
"hmac-sha1-etm@openssh.com": MAC(MACMode.ETM,
Issue(Severity.notice, "weak HMAC hash", "hmac-sha1-etm@openssh.com")),
"hmac-sha1-96-etm@openssh.com": MAC(MACMode.ETM,
Issue(Severity.notice, "weak HMAC hash", "hmac-sha1-96-etm@openssh.com"),
Issue(Severity.notice, "small MAC tag", "96 bits", "hmac-sha1-96-etm@openssh.com")),
"hmac-sha2-256-etm@openssh.com": MAC(MACMode.ETM),
"hmac-sha2-512-etm@openssh.com": MAC(MACMode.ETM),
"umac-64-etm@openssh.com": MAC(MACMode.ETM,
Issue(Severity.notice, "small MAC tag", "64 bits", "umac-64-etm@openssh.com")),
"umac-128-etm@openssh.com": MAC(MACMode.ETM),
}
SIGN_DSA = "ssh-dss"
SIGN_ECDSA_NISTP256_SHA256 = "ecdsa-sha2-nistp256"
SIGN_ECDSA_NISTP384_SHA384 = "ecdsa-sha2-nistp384"
SIGN_ECDSA_NISTP521_SHA512 = "ecdsa-sha2-nistp521"
SIGN_RSA_SHA1 = "ssh-rsa"
SIGN_RSA_SHA256 = "rsa-sha2-256"
SIGN_RSA_SHA512 = "rsa-sha2-512"
known_host_key_algorithms = {
SIGN_DSA: [
issue_sign_dsa(Severity.notice, SIGN_DSA),
issue_sign_small_key(Severity.error, SIGN_DSA, 1024)
],
SIGN_ECDSA_NISTP256_SHA256: [
issue_sign_dsa(Severity.notice, SIGN_ECDSA_NISTP256_SHA256),
issue_sign_ecdsa_unsafe_curve(Severity.notice, SIGN_ECDSA_NISTP256_SHA256),
],
SIGN_ECDSA_NISTP384_SHA384: [
issue_sign_dsa(Severity.notice, SIGN_ECDSA_NISTP384_SHA384),
issue_sign_ecdsa_unsafe_curve(Severity.notice, SIGN_ECDSA_NISTP256_SHA256),
],
SIGN_ECDSA_NISTP521_SHA512: [
issue_sign_dsa(Severity.notice, SIGN_ECDSA_NISTP521_SHA512),
],
SIGN_RSA_SHA1: [],
SIGN_RSA_SHA256: [],
SIGN_RSA_SHA512: [],
}
|
|
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils import units
from manila.common import constants
from manila import context
import manila.exception as exception
from manila.share import configuration
from manila.share.drivers.cephfs import cephfs_native
from manila.share import share_types
from manila import test
from manila.tests import fake_share
class MockVolumeClientModule(object):
"""Mocked up version of ceph's VolumeClient interface."""
class VolumePath(object):
"""Copy of VolumePath from CephFSVolumeClient."""
def __init__(self, group_id, volume_id):
self.group_id = group_id
self.volume_id = volume_id
def __eq__(self, other):
return (self.group_id == other.group_id
and self.volume_id == other.volume_id)
def __str__(self):
return "{0}/{1}".format(self.group_id, self.volume_id)
class CephFSVolumeClient(mock.Mock):
mock_used_bytes = 0
version = 1
def __init__(self, *args, **kwargs):
mock.Mock.__init__(self, spec=[
"connect", "disconnect",
"create_snapshot_volume", "destroy_snapshot_volume",
"create_group", "destroy_group",
"delete_volume", "purge_volume",
"deauthorize", "evict", "set_max_bytes",
"destroy_snapshot_group", "create_snapshot_group",
"get_authorized_ids"
])
self.create_volume = mock.Mock(return_value={
"mount_path": "/foo/bar"
})
self.get_mon_addrs = mock.Mock(return_value=["1.2.3.4", "5.6.7.8"])
self.get_authorized_ids = mock.Mock(
return_value=[('eve', 'rw')])
self.authorize = mock.Mock(return_value={"auth_key": "abc123"})
self.get_used_bytes = mock.Mock(return_value=self.mock_used_bytes)
self.rados = mock.Mock()
self.rados.get_cluster_stats = mock.Mock(return_value={
"kb": 1000,
"kb_avail": 500
})
@ddt.ddt
class CephFSNativeDriverTestCase(test.TestCase):
"""Test the CephFS native driver.
This is a very simple driver that mainly
calls through to the CephFSVolumeClient interface, so the tests validate
that the Manila driver calls map to the appropriate CephFSVolumeClient
calls.
"""
def setUp(self):
super(CephFSNativeDriverTestCase, self).setUp()
self.fake_conf = configuration.Configuration(None)
self._context = context.get_admin_context()
self._share = fake_share.fake_share(share_proto='CEPHFS')
self.fake_conf.set_default('driver_handles_share_servers', False)
self.fake_conf.set_default('cephfs_auth_id', 'manila')
self.mock_object(cephfs_native, "ceph_volume_client",
MockVolumeClientModule)
self.mock_object(cephfs_native, "ceph_module_found", True)
self._driver = (
cephfs_native.CephFSNativeDriver(configuration=self.fake_conf))
self.mock_object(share_types, 'get_share_type_extra_specs',
mock.Mock(return_value={}))
def test_create_share(self):
expected_export_locations = {
'path': '1.2.3.4,5.6.7.8:/foo/bar',
'is_admin_only': False,
'metadata': {},
}
export_locations = self._driver.create_share(self._context,
self._share)
self.assertEqual(expected_export_locations, export_locations)
self._driver._volume_client.create_volume.assert_called_once_with(
self._driver._share_path(self._share),
size=self._share['size'] * units.Gi,
data_isolated=False)
def test_ensure_share(self):
self._driver.ensure_share(self._context,
self._share)
self._driver._volume_client.create_volume.assert_called_once_with(
self._driver._share_path(self._share),
size=self._share['size'] * units.Gi,
data_isolated=False)
def test_create_data_isolated(self):
self.mock_object(share_types, 'get_share_type_extra_specs',
mock.Mock(return_value={"cephfs:data_isolated": True})
)
self._driver.create_share(self._context, self._share)
self._driver._volume_client.create_volume.assert_called_once_with(
self._driver._share_path(self._share),
size=self._share['size'] * units.Gi,
data_isolated=True)
def test_delete_share(self):
self._driver.delete_share(self._context, self._share)
self._driver._volume_client.delete_volume.assert_called_once_with(
self._driver._share_path(self._share),
data_isolated=False)
self._driver._volume_client.purge_volume.assert_called_once_with(
self._driver._share_path(self._share),
data_isolated=False)
def test_delete_data_isolated(self):
self.mock_object(share_types, 'get_share_type_extra_specs',
mock.Mock(return_value={"cephfs:data_isolated": True})
)
self._driver.delete_share(self._context, self._share)
self._driver._volume_client.delete_volume.assert_called_once_with(
self._driver._share_path(self._share),
data_isolated=True)
self._driver._volume_client.purge_volume.assert_called_once_with(
self._driver._share_path(self._share),
data_isolated=True)
@ddt.data(None, 1)
def test_allow_access_rw(self, volume_client_version):
rule = {
'access_level': constants.ACCESS_LEVEL_RW,
'access_to': 'alice',
'access_type': 'cephx',
}
self._driver.volume_client.version = volume_client_version
auth_key = self._driver._allow_access(
self._context, self._share, rule)
self.assertEqual("abc123", auth_key)
if not volume_client_version:
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice")
else:
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
readonly=False,
tenant_id=self._share['project_id'])
@ddt.data(None, 1)
def test_allow_access_ro(self, volume_client_version):
rule = {
'access_level': constants.ACCESS_LEVEL_RO,
'access_to': 'alice',
'access_type': 'cephx',
}
self._driver.volume_client.version = volume_client_version
if not volume_client_version:
self.assertRaises(exception.InvalidShareAccessLevel,
self._driver._allow_access,
self._context, self._share, rule)
else:
auth_key = self._driver._allow_access(self._context, self._share,
rule)
self.assertEqual("abc123", auth_key)
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
readonly=True,
tenant_id=self._share['project_id'],
)
def test_allow_access_wrong_type(self):
self.assertRaises(exception.InvalidShareAccess,
self._driver._allow_access,
self._context, self._share, {
'access_level': constants.ACCESS_LEVEL_RW,
'access_type': 'RHUBARB',
'access_to': 'alice'
})
def test_allow_access_same_cephx_id_as_manila_service(self):
self.assertRaises(exception.InvalidInput,
self._driver._allow_access,
self._context, self._share, {
'access_level': constants.ACCESS_LEVEL_RW,
'access_type': 'cephx',
'access_to': 'manila',
})
def test_deny_access(self):
self._driver._deny_access(self._context, self._share, {
'access_level': 'rw',
'access_type': 'cephx',
'access_to': 'alice'
})
self._driver._volume_client.deauthorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice")
self._driver._volume_client.evict.assert_called_once_with(
"alice",
volume_path=self._driver._share_path(self._share))
def test_update_access_add_rm(self):
alice = {
'id': 'instance_mapping_id1',
'access_id': 'accessid1',
'access_level': 'rw',
'access_type': 'cephx',
'access_to': 'alice'
}
bob = {
'id': 'instance_mapping_id2',
'access_id': 'accessid2',
'access_level': 'rw',
'access_type': 'cephx',
'access_to': 'bob'
}
access_updates = self._driver.update_access(
self._context, self._share, access_rules=[alice],
add_rules=[alice], delete_rules=[bob])
self.assertEqual(
{'accessid1': {'access_key': 'abc123'}}, access_updates)
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
readonly=False,
tenant_id=self._share['project_id'])
self._driver._volume_client.deauthorize.assert_called_once_with(
self._driver._share_path(self._share),
"bob")
@ddt.data(None, 1)
def test_update_access_all(self, volume_client_version):
alice = {
'id': 'instance_mapping_id1',
'access_id': 'accessid1',
'access_level': 'rw',
'access_type': 'cephx',
'access_to': 'alice'
}
self._driver.volume_client.version = volume_client_version
access_updates = self._driver.update_access(self._context, self._share,
access_rules=[alice],
add_rules=[],
delete_rules=[])
self.assertEqual(
{'accessid1': {'access_key': 'abc123'}}, access_updates)
if volume_client_version:
(self._driver._volume_client.get_authorized_ids.
assert_called_once_with(self._driver._share_path(self._share)))
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
readonly=False,
tenant_id=self._share['project_id']
)
self._driver._volume_client.deauthorize.assert_called_once_with(
self._driver._share_path(self._share),
"eve",
)
else:
self.assertFalse(
self._driver._volume_client.get_authorized_ids.called)
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
)
def test_extend_share(self):
new_size_gb = self._share['size'] * 2
new_size = new_size_gb * units.Gi
self._driver.extend_share(self._share, new_size_gb, None)
self._driver._volume_client.set_max_bytes.assert_called_once_with(
self._driver._share_path(self._share),
new_size)
def test_shrink_share(self):
new_size_gb = self._share['size'] * 0.5
new_size = new_size_gb * units.Gi
self._driver.shrink_share(self._share, new_size_gb, None)
self._driver._volume_client.get_used_bytes.assert_called_once_with(
self._driver._share_path(self._share))
self._driver._volume_client.set_max_bytes.assert_called_once_with(
self._driver._share_path(self._share),
new_size)
def test_shrink_share_full(self):
"""That shrink fails when share is too full."""
new_size_gb = self._share['size'] * 0.5
# Pretend to be full up
vc = MockVolumeClientModule.CephFSVolumeClient
vc.mock_used_bytes = (units.Gi * self._share['size'])
self.assertRaises(exception.ShareShrinkingPossibleDataLoss,
self._driver.shrink_share,
self._share, new_size_gb, None)
self._driver._volume_client.set_max_bytes.assert_not_called()
def test_create_snapshot(self):
self._driver.create_snapshot(self._context,
{
"id": "instance1",
"share": self._share,
"snapshot_id": "snappy1"
},
None)
(self._driver._volume_client.create_snapshot_volume
.assert_called_once_with(
self._driver._share_path(self._share),
"snappy1_instance1"))
def test_delete_snapshot(self):
self._driver.delete_snapshot(self._context,
{
"id": "instance1",
"share": self._share,
"snapshot_id": "snappy1"
},
None)
(self._driver._volume_client.destroy_snapshot_volume
.assert_called_once_with(
self._driver._share_path(self._share),
"snappy1_instance1"))
def test_create_share_group(self):
self._driver.create_share_group(self._context, {"id": "grp1"}, None)
self._driver._volume_client.create_group.assert_called_once_with(
"grp1")
def test_delete_share_group(self):
self._driver.delete_share_group(self._context, {"id": "grp1"}, None)
self._driver._volume_client.destroy_group.assert_called_once_with(
"grp1")
def test_create_share_snapshot(self):
self._driver.create_share_group_snapshot(self._context, {
'share_group_id': 'sgid',
'id': 'snapid'
})
(self._driver._volume_client.create_snapshot_group.
assert_called_once_with("sgid", "snapid"))
def test_delete_share_group_snapshot(self):
self._driver.delete_share_group_snapshot(self._context, {
'share_group_id': 'sgid',
'id': 'snapid'
})
(self._driver._volume_client.destroy_snapshot_group.
assert_called_once_with("sgid", "snapid"))
def test_delete_driver(self):
# Create share to prompt volume_client construction
self._driver.create_share(self._context,
self._share)
vc = self._driver._volume_client
del self._driver
vc.disconnect.assert_called_once_with()
def test_delete_driver_no_client(self):
self.assertIsNone(self._driver._volume_client)
del self._driver
def test_connect_noevict(self):
# When acting as "admin", driver should skip evicting
self._driver.configuration.local_conf.set_override('cephfs_auth_id',
"admin",
enforce_type=True)
self._driver.create_share(self._context,
self._share)
vc = self._driver._volume_client
vc.connect.assert_called_once_with(premount_evict=None)
def test_update_share_stats(self):
self._driver._volume_client
self._driver._update_share_stats()
result = self._driver._stats
self.assertEqual("CEPHFS", result['storage_protocol'])
def test_module_missing(self):
cephfs_native.ceph_module_found = False
cephfs_native.ceph_volume_client = None
self.assertRaises(exception.ManilaException,
self._driver.create_share,
self._context,
self._share)
def test_check_for_setup_error(self):
self._driver.check_for_setup_error()
self._driver._volume_client.connect.assert_called_once_with(
premount_evict='manila')
def test_check_for_setup_error_with_connection_error(self):
cephfs_native.ceph_module_found = False
cephfs_native.ceph_volume_client = None
self.assertRaises(exception.ManilaException,
self._driver.check_for_setup_error)
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The WebDriver implementation."""
import base64
import warnings
from contextlib import contextmanager
from .command import Command
from .webelement import WebElement
from .remote_connection import RemoteConnection
from .errorhandler import ErrorHandler
from .switch_to import SwitchTo
from .mobile import Mobile
from .file_detector import FileDetector, LocalFileDetector
from selenium.common.exceptions import (InvalidArgumentException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.html5.application_cache import ApplicationCache
try:
str = basestring
except NameError:
pass
class WebDriver(object):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol
as defined at
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol
:Attributes:
- session_id - String ID of the browser session started and controlled by this WebDriver.
- capabilities - Dictionaty of effective capabilities of this browser session as returned
by the remote server. See https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities
- command_executor - remote_connection.RemoteConnection object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to handle errors.
"""
_web_element_cls = WebElement
def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub',
desired_capabilities=None, browser_profile=None, proxy=None,
keep_alive=False, file_detector=None):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a string representing URL of the remote server or a custom
remote_connection.RemoteConnection object. Defaults to 'http://127.0.0.1:4444/wd/hub'.
- desired_capabilities - A dictionary of capabilities to request when
starting the browser session. Required parameter.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object.
Only used if Firefox is requested. Optional.
- proxy - A selenium.webdriver.common.proxy.Proxy object. The browser session will
be started with given proxy settings, if possible. Optional.
- keep_alive - Whether to configure remote_connection.RemoteConnection to use
HTTP keep-alive. Defaults to False.
- file_detector - Pass custom file detector object during instantiation. If None,
then default LocalFileDetector() will be used.
"""
if desired_capabilities is None:
raise WebDriverException("Desired Capabilities can't be None")
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
if proxy is not None:
warnings.warn("Please use FirefoxOptions to set proxy",
DeprecationWarning)
proxy.add_to_capabilities(desired_capabilities)
self.command_executor = command_executor
if type(self.command_executor) is bytes or isinstance(self.command_executor, str):
self.command_executor = RemoteConnection(command_executor, keep_alive=keep_alive)
self._is_remote = True
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
if browser_profile is not None:
warnings.warn("Please use FirefoxOptions to set browser profile",
DeprecationWarning)
self.start_session(desired_capabilities, browser_profile)
self._switch_to = SwitchTo(self)
self._mobile = Mobile(self)
self.file_detector = file_detector or LocalFileDetector()
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}")>'.format(
type(self), self.session_id)
@contextmanager
def file_detector_context(self, file_detector_class, *args, **kwargs):
"""
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
"""
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector is not None:
self.file_detector = last_detector
@property
def mobile(self):
return self._mobile
@property
def name(self):
"""Returns the name of the underlying browser for this instance.
:Usage:
- driver.name
"""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom startup behavior.
"""
pass
def stop_client(self):
"""
Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, capabilities, browser_profile=None):
"""
Creates a new session with the desired capabilities.
:Args:
- browser_name - The name of the browser to request.
- version - Which browser version to request.
- platform - Which platform to request the browser on.
- javascript_enabled - Whether the new session should support JavaScript.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if not isinstance(capabilities, dict):
raise InvalidArgumentException("Capabilities must be a dictionary")
w3c_caps = {"firstMatch": [], "alwaysMatch": {}}
if browser_profile:
if "moz:firefoxOptions" in capabilities:
capabilities["moz:firefoxOptions"]["profile"] = browser_profile.encoded
else:
capabilities.update({'firefox_profile': browser_profile.encoded})
w3c_caps["alwaysMatch"].update(capabilities)
parameters = {"capabilities": w3c_caps,
"desiredCapabilities": capabilities}
response = self.execute(Command.NEW_SESSION, parameters)
if 'sessionId' not in response:
response = response['value']
self.session_id = response['sessionId']
self.capabilities = response.get('value')
# if capabilities is none we are probably speaking to
# a W3C endpoint
if self.capabilities is None:
self.capabilities = response.get('capabilities')
# Double check to see if we have a W3C Compliant browser
self.w3c = response.get('status') is None
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, self._web_element_cls):
return {'ELEMENT': value.id, 'element-6066-11e4-a52e-4f735466cecf': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
"""Creates a web element with the specified `element_id`."""
return self._web_element_cls(self, element_id, w3c=self.w3c)
def _unwrap_value(self, value):
if isinstance(value, dict) and ('ELEMENT' in value or 'element-6066-11e4-a52e-4f735466cecf' in value):
wrapped_id = value.get('ELEMENT', None)
if wrapped_id:
return self.create_web_element(value['ELEMENT'])
else:
return self.create_web_element(value['element-6066-11e4-a52e-4f735466cecf'])
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command, params=None):
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if self.session_id is not None:
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url):
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self):
"""Returns the title of the current page.
:Usage:
driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] is not None else ""
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\_ - The id of the element to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""
Finds multiple elements by id.
:Args:
- id\_ - The id of the elements to be found.
:Usage:
driver.find_elements_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Usage:
driver.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Usage:
driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
"""
Finds an element by link text.
:Args:
- link_text: The text of the element to be found.
:Usage:
driver.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Usage:
driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
"""
Finds an element by a partial match of its link text.
:Args:
- link_text: The text of the element to partially match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""
Finds elements by a partial match of their link text.
:Args:
- link_text: The text of the element to partial match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
"""
Finds an element by name.
:Args:
- name: The name of the element to find.
:Usage:
driver.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""
Finds elements by name.
:Args:
- name: The name of the elements to find.
:Usage:
driver.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
"""
Finds an element by tag name.
:Args:
- name: The tag name of the element to find.
:Usage:
driver.find_element_by_tag_name('foo')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""
Finds elements by tag name.
:Args:
- name: The tag name the use when finding elements.
:Usage:
driver.find_elements_by_tag_name('foo')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
"""
Finds an element by class name.
:Args:
- name: The class name of the element to find.
:Usage:
driver.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Usage:
driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""
Finds an element by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""
Finds elements by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def execute_script(self, script, *args):
"""
Synchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_script('document.title')
"""
converted_args = list(args)
command = None
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT
else:
command = Command.EXECUTE_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
def execute_async_script(self, script, *args):
"""
Asynchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_async_script('document.title')
"""
converted_args = list(args)
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT_ASYNC
else:
command = Command.EXECUTE_ASYNC_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
@property
def current_url(self):
"""
Gets the URL of the current page.
:Usage:
driver.current_url
"""
return self.execute(Command.GET_CURRENT_URL)['value']
@property
def page_source(self):
"""
Gets the source of the current page.
:Usage:
driver.page_source
"""
return self.execute(Command.GET_PAGE_SOURCE)['value']
def close(self):
"""
Closes the current window.
:Usage:
driver.close()
"""
self.execute(Command.CLOSE)
def quit(self):
"""
Quits the driver and closes every associated window.
:Usage:
driver.quit()
"""
try:
self.execute(Command.QUIT)
finally:
self.stop_client()
@property
def current_window_handle(self):
"""
Returns the handle of the current window.
:Usage:
driver.current_window_handle
"""
if self.w3c:
return self.execute(Command.W3C_GET_CURRENT_WINDOW_HANDLE)['value']
else:
return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
@property
def window_handles(self):
"""
Returns the handles of all windows within the current session.
:Usage:
driver.window_handles
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value']
else:
return self.execute(Command.GET_WINDOW_HANDLES)['value']
def maximize_window(self):
"""
Maximizes the current window that webdriver is using
"""
command = Command.MAXIMIZE_WINDOW
if self.w3c:
command = Command.W3C_MAXIMIZE_WINDOW
self.execute(command, {"windowHandle": "current"})
@property
def switch_to(self):
return self._switch_to
# Target Locators
def switch_to_active_element(self):
""" Deprecated use driver.switch_to.active_element
"""
warnings.warn("use driver.switch_to.active_element instead", DeprecationWarning)
return self._switch_to.active_element
def switch_to_window(self, window_name):
""" Deprecated use driver.switch_to.window
"""
warnings.warn("use driver.switch_to.window instead", DeprecationWarning)
self._switch_to.window(window_name)
def switch_to_frame(self, frame_reference):
""" Deprecated use driver.switch_to.frame
"""
warnings.warn("use driver.switch_to.frame instead", DeprecationWarning)
self._switch_to.frame(frame_reference)
def switch_to_default_content(self):
""" Deprecated use driver.switch_to.default_content
"""
warnings.warn("use driver.switch_to.default_content instead", DeprecationWarning)
self._switch_to.default_content()
def switch_to_alert(self):
""" Deprecated use driver.switch_to.alert
"""
warnings.warn("use driver.switch_to.alert instead", DeprecationWarning)
return self._switch_to.alert
# Navigation
def back(self):
"""
Goes one step backward in the browser history.
:Usage:
driver.back()
"""
self.execute(Command.GO_BACK)
def forward(self):
"""
Goes one step forward in the browser history.
:Usage:
driver.forward()
"""
self.execute(Command.GO_FORWARD)
def refresh(self):
"""
Refreshes the current page.
:Usage:
driver.refresh()
"""
self.execute(Command.REFRESH)
# Options
def get_cookies(self):
"""
Returns a set of dictionaries, corresponding to cookies visible in the current session.
:Usage:
driver.get_cookies()
"""
return self.execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name):
"""
Get a single cookie by name. Returns the cookie if found, None if not.
:Usage:
driver.get_cookie('my_cookie')
"""
cookies = self.get_cookies()
for cookie in cookies:
if cookie['name'] == name:
return cookie
return None
def delete_cookie(self, name):
"""
Deletes a single cookie with the given name.
:Usage:
driver.delete_cookie('my_cookie')
"""
self.execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self):
"""
Delete all cookies in the scope of the session.
:Usage:
driver.delete_all_cookies()
"""
self.execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict):
"""
Adds a cookie to your current session.
:Args:
- cookie_dict: A dictionary object, with required keys - "name" and "value";
optional keys - "path", "domain", "secure", "expiry"
Usage:
driver.add_cookie({'name' : 'foo', 'value' : 'bar'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True})
"""
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
# Timeouts
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'implicit': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.IMPLICIT_WAIT, {
'ms': float(time_to_wait) * 1000})
def set_script_timeout(self, time_to_wait):
"""
Set the amount of time that the script should wait during an
execute_async_script call before throwing an error.
:Args:
- time_to_wait: The amount of time to wait (in seconds)
:Usage:
driver.set_script_timeout(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'script': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.SET_SCRIPT_TIMEOUT, {
'ms': float(time_to_wait) * 1000})
def set_page_load_timeout(self, time_to_wait):
"""
Set the amount of time to wait for a page load to complete
before throwing an error.
:Args:
- time_to_wait: The amount of time to wait
:Usage:
driver.set_page_load_timeout(30)
"""
try:
self.execute(Command.SET_TIMEOUTS, {
'pageLoad': int(float(time_to_wait) * 1000)})
except WebDriverException:
self.execute(Command.SET_TIMEOUTS, {
'ms': float(time_to_wait) * 1000,
'type': 'page load'})
def find_element(self, by=By.ID, value=None):
"""
'Private' method used by the find_element_by_* methods.
:Usage:
Use the corresponding find_element_by_* instead of this.
:rtype: WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENT, {
'using': by,
'value': value})['value']
def find_elements(self, by=By.ID, value=None):
"""
'Private' method used by the find_elements_by_* methods.
:Usage:
Use the corresponding find_elements_by_* instead of this.
:rtype: list of WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENTS, {
'using': by,
'value': value})['value']
@property
def desired_capabilities(self):
"""
returns the drivers current desired capabilities being used
"""
return self.capabilities
def get_screenshot_as_file(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to.
:Usage:
driver.get_screenshot_as_file('/Screenshots/foo.png')
"""
png = self.get_screenshot_as_png()
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
def save_screenshot(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to.
:Usage:
driver.save_screenshot('/Screenshots/foo.png')
"""
return self.get_screenshot_as_file(filename)
def get_screenshot_as_png(self):
"""
Gets the screenshot of the current window as a binary data.
:Usage:
driver.get_screenshot_as_png()
"""
return base64.b64decode(self.get_screenshot_as_base64().encode('ascii'))
def get_screenshot_as_base64(self):
"""
Gets the screenshot of the current window as a base64 encoded string
which is useful in embedded images in HTML.
:Usage:
driver.get_screenshot_as_base64()
"""
return self.execute(Command.SCREENSHOT)['value']
def set_window_size(self, width, height, windowHandle='current'):
"""
Sets the width and height of the current window. (window.resizeTo)
:Args:
- width: the width in pixels to set the window to
- height: the height in pixels to set the window to
:Usage:
driver.set_window_size(800,600)
"""
command = Command.SET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_SET_WINDOW_SIZE
self.execute(command, {
'width': int(width),
'height': int(height),
'windowHandle': windowHandle})
def get_window_size(self, windowHandle='current'):
"""
Gets the width and height of the current window.
:Usage:
driver.get_window_size()
"""
command = Command.GET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_GET_WINDOW_SIZE
size = self.execute(command, {'windowHandle': windowHandle})
if size.get('value', None) is not None:
return size['value']
else:
return size
def set_window_position(self, x, y, windowHandle='current'):
"""
Sets the x,y position of the current window. (window.moveTo)
:Args:
- x: the x-coordinate in pixels to set the window position
- y: the y-coordinate in pixels to set the window position
:Usage:
driver.set_window_position(0,0)
"""
if self.w3c:
return self.execute(Command.W3C_SET_WINDOW_POSITION, {
'x': int(x),
'y': int(y)
})
else:
self.execute(Command.SET_WINDOW_POSITION,
{
'x': int(x),
'y': int(y),
'windowHandle': windowHandle
})
def get_window_position(self, windowHandle='current'):
"""
Gets the x,y position of the current window.
:Usage:
driver.get_window_position()
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_POSITION)['value']
else:
return self.execute(Command.GET_WINDOW_POSITION, {
'windowHandle': windowHandle})['value']
def get_window_rect(self):
"""
Gets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.get_window_rect()
"""
return self.execute(Command.GET_WINDOW_RECT)['value']
def set_window_rect(self, x=None, y=None, width=None, height=None):
"""
Sets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.set_window_rect(x=10, y=10)
driver.set_window_rect(width=100, height=200)
driver.set_window_rect(x=10, y=10, width=100, height=200)
"""
if (x is None and y is None) and (height is None and width is None):
raise InvalidArgumentException("x and y or height and width need values")
return self.execute(Command.SET_WINDOW_RECT, {"x": x, "y": y,
"width": width,
"height": height})['value']
@property
def file_detector(self):
return self._file_detector
@file_detector.setter
def file_detector(self, detector):
"""
Set the file detector to be used when sending keyboard input.
By default, this is set to a file detector that does nothing.
see FileDetector
see LocalFileDetector
see UselessFileDetector
:Args:
- detector: The detector to use. Must not be None.
"""
if detector is None:
raise WebDriverException("You may not set a file detector that is null")
if not isinstance(detector, FileDetector):
raise WebDriverException("Detector has to be instance of FileDetector")
self._file_detector = detector
@property
def orientation(self):
"""
Gets the current orientation of the device
:Usage:
orientation = driver.orientation
"""
return self.execute(Command.GET_SCREEN_ORIENTATION)['value']
@orientation.setter
def orientation(self, value):
"""
Sets the current orientation of the device
:Args:
- value: orientation to set it to.
:Usage:
driver.orientation = 'landscape'
"""
allowed_values = ['LANDSCAPE', 'PORTRAIT']
if value.upper() in allowed_values:
self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value})
else:
raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
@property
def application_cache(self):
""" Returns a ApplicationCache Object to interact with the browser app cache"""
return ApplicationCache(self)
@property
def log_types(self):
"""
Gets a list of the available log types
:Usage:
driver.log_types
"""
return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value']
def get_log(self, log_type):
"""
Gets the log for a given log type
:Args:
- log_type: type of log that which will be returned
:Usage:
driver.get_log('browser')
driver.get_log('driver')
driver.get_log('client')
driver.get_log('server')
"""
return self.execute(Command.GET_LOG, {'type': log_type})['value']
|
|
#!/usr/bin/env python
import gflags
import json
import logging
import math
import numpy
import os
import pprint
import sys
from scipy.misc import pilutil
FLAGS = gflags.FLAGS
gflags.DEFINE_integer('start_year', 2013, 'Start processing from this year')
gflags.DEFINE_integer('start_month', 1, 'Start processing from this month')
gflags.RegisterValidator('start_month',
lambda value: value >= 1 and value <= 12,
message='Invalid start month')
gflags.DEFINE_integer('end_year', 2013, 'End processing with this year')
gflags.DEFINE_integer('end_month', 2, 'End processing with this month')
gflags.RegisterValidator('end_month',
lambda value: value >= 1 and value <= 12,
message='Invalid end month')
gflags.DEFINE_integer('width', 1920, 'Width of output')
gflags.DEFINE_integer('height', 1440, 'Height of output')
gflags.DEFINE_string('output', '', 'Output prefix', short_name = 'o')
gflags.DEFINE_string('test_type', 'ping', 'Test type to visualize.',
short_name = 't')
gflags.DEFINE_string('color_field', 'mean_rtt_ms',
'Field to use to color the pixels', short_name = 'c')
# TODO: validate color_field based on test_type
gflags.RegisterValidator(
'color_field',
lambda value: value == 'mean_rtt_ms' or value == 'max_rtt_ms' or value == 'packet_loss',
message='--color_field passed an invalid value')
logging.basicConfig(format = '[%(asctime)s] %(levelname)s: %(message)s',
level = logging.INFO)
# The MinRTT pixel is coloured based on the following constants. MinRTT outside
# of the range described by these constants are ignored.
RTT_BLUE = 0
RTT_GREEN = 80
RTT_RED = 1000
# The Packet Loss pixel is coloured based on the following constants. Packet
# Loss outside of the range described is ignored.
PACKETLOSS_GREEN = 0
PACKETLOSS_RED = 1.0
# Set the GAMMA to a higher number to boost the color of high packet loss.
PACKETLOSS_GAMMA = 4.0
MAP_ARRAY = None
COLOR_FUNC = None
COLOR_VALUE = None
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def linear_projection(latitude, longitude):
return Point(longitude, latitude)
def cylindrical_equidistant_projection(latitude, longitude):
# If longitude0 is non-zero, don't forget to wrap x.
longitude0 = 0.0
phi = 0.0 # equirectangular
# phi = 0.654498469 # Miller 1
# phi = 0.750491578 # Miller 2
# phi = 0.880809496 # Miller 3
return Point((longitude - longitude0) * math.cos(phi), latitude)
def mercator_projection(latitude, longitude):
# If longitude0 is non-zero, don't forget to wrap x.
longitude0 = 0.0
latitude_rad = math.radians(latitude)
return Point(
longitude - longitude0,
math.degrees(math.log(math.tan(math.pi / 4 + latitude_rad / 2)))
)
# TODO(dominic): Choice of projection from flags
#PROJECTION_FUNC = linear_projection
PROJECTION_FUNC = cylindrical_equidistant_projection
#PROJECTION_FUNC = mercator_projection
def get_color_for_rtt(rtt):
if rtt < RTT_BLUE or rtt > RTT_RED:
return [0, 0, 0]
elif rtt > RTT_GREEN:
col = (rtt - RTT_GREEN) / (RTT_RED - RTT_GREEN)
return [col, 1.0 - col, 0]
else:
col = (rtt - RTT_BLUE) / (RTT_GREEN - RTT_BLUE)
return [0, col, 1.0 - col]
def get_color_for_packetloss(packet_loss):
if packet_loss < PACKETLOSS_GREEN or packet_loss > PACKETLOSS_RED:
logging.warning('rejecting %.3f', packet_loss)
return [0, 0, 0]
else:
col = (packet_loss - PACKETLOSS_GREEN) / \
(PACKETLOSS_RED - PACKETLOSS_GREEN)
col = math.pow(col, 1.0 / PACKETLOSS_GAMMA)
return [col, 1.0 - col, 0.0]
COLOR_FIELDS = {
'mean_rtt_ms': get_color_for_rtt,
'max_rtt_ms': get_color_for_rtt,
}
def plot_item(item):
global MAP_ARRAY
logging.debug('Converting %s', item)
color_key = None
for v in item["values"]:
logging.debug(' %s', v)
if v["name"] == FLAGS.color_field:
color_key = v["value"]
logging.debug(' %s', color_key)
break
if color_key == None:
# logging.warning('No values found for item %s',
# item["device_properties"]["timestamp"])
return
location = item["device_properties"]["location"]
latitude = location["latitude"]
longitude = location["longitude"]
if longitude < -180 or longitude > 180:
logging.error('Invalid longitude %.3f', longitude)
if latitude < -90 or latitude > 90:
logging.error('Invalid latitude %.3f', latitude)
projected = PROJECTION_FUNC(latitude, longitude)
if projected.x < -180 or projected.x > 180:
logging.warn('Invalid projected longitude %.3f', projected.x)
if projected.y < -90 or projected.y > 90:
logging.warn('Invalid projected latitude %.3f', projected.y)
map_coord = Point(FLAGS.width * (projected.x / 360.0 + 0.5),
FLAGS.height * (1.0 - (projected.y / 180.0 + 0.5)))
try:
color = COLOR_FUNC(float(color_key))
# These coordinates are not reversed - rows first
logging.debug("setting %d.%d to %.2f %.2f %.2f", map_coord.y, map_coord.x,
color[0], color[1], color[2])
MAP_ARRAY[map_coord.y, map_coord.x] += color
except IndexError:
logging.error('Bad map coord: %s', pprint.pprint(map_coord))
def main():
global COLOR_FUNC
global MAP_ARRAY
try:
FLAGS(sys.argv)
except gflags.FlagsError, err:
print '%s\nUsage: %s ARGS\n%s' % (err, sys.argv[0], FLAGS)
sys.exit(1)
# Set color_field from FLAGS
COLOR_FUNC = COLOR_FIELDS[FLAGS.color_field]
try:
year = FLAGS.start_year
month = FLAGS.start_month
while year < FLAGS.end_year or (year == FLAGS.end_year and month <= FLAGS.end_month):
# These dimensions are not reversed - number of rows is first
MAP_ARRAY = numpy.zeros((FLAGS.height, FLAGS.width, 3), dtype = numpy.float)
logging.info('Running query for %d.%d', year, month)
# Open every json file in folder data/year/month/*.json
file_contents = ""
directory = os.path.join("data",str(year).zfill(2),str(month).zfill(2))
logging.info("Checking %s", directory)
for root,dirs,files in os.walk(directory):
for file in files:
if file.endswith(".json"):
logging.info(" Opening %s", file)
f = open(os.path.join(directory, file), 'r')
for line in f:
if line == None:
break
item = json.JSONDecoder().decode(line)
# TODO: filter item against color_field.select and type
if item["type"] == FLAGS.test_type:
plot_item(item)
f.close()
# convert to image and show
# img = pilutil.toimage(MAP_ARRAY)
# img.show()
# TODO(dominic): Normalize/gamma correct on flag
MAP_ARRAY.clip(0.0, 1.0, out=MAP_ARRAY)
# save image to disk
output_name = FLAGS.output + FLAGS.test_type + '.' + \
FLAGS.color_field + '.' + str(year) + '.' + str(month).zfill(2) + '.bmp'
logging.info('Saving map to %s', output_name)
pilutil.imsave(output_name, MAP_ARRAY)
month += 1
if month > 12:
month -= 12
year += 1
except Exception as e:
logging.error(e)
sys.exit(1)
logging.info('Complete')
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Installation and deployment script."""
import os
import pkg_resources
import sys
try:
from setuptools import find_packages, setup
except ImportError:
from distutils.core import find_packages, setup
try:
from distutils.command.bdist_msi import bdist_msi
except ImportError:
bdist_msi = None
try:
from distutils.command.bdist_rpm import bdist_rpm
except ImportError:
bdist_rpm = None
try:
from setuptools.commands.sdist import sdist
except ImportError:
from distutils.command.sdist import sdist
version_tuple = (sys.version_info[0], sys.version_info[1])
if version_tuple < (3, 6):
print((
'Unsupported Python version: {0:s}, version 3.6 or higher '
'required.').format(sys.version))
sys.exit(1)
# Change PYTHONPATH to include dfvfs so that we can get the version.
sys.path.insert(0, '.')
import dfvfs # pylint: disable=wrong-import-position
if not bdist_msi:
BdistMSICommand = None
else:
class BdistMSICommand(bdist_msi):
"""Custom handler for the bdist_msi command."""
# pylint: disable=invalid-name
def run(self):
"""Builds an MSI."""
# Command bdist_msi does not support the library version, neither a date
# as a version but if we suffix it with .1 everything is fine.
self.distribution.metadata.version += '.1'
bdist_msi.run(self)
if not bdist_rpm:
BdistRPMCommand = None
else:
class BdistRPMCommand(bdist_rpm):
"""Custom handler for the bdist_rpm command."""
# pylint: disable=invalid-name
def _make_spec_file(self):
"""Generates the text of an RPM spec file.
Returns:
list[str]: lines of the RPM spec file.
"""
# Note that bdist_rpm can be an old style class.
if issubclass(BdistRPMCommand, object):
spec_file = super(BdistRPMCommand, self)._make_spec_file()
else:
spec_file = bdist_rpm._make_spec_file(self)
python_package = 'python3'
description = []
requires = ''
summary = ''
in_description = False
python_spec_file = []
for line in iter(spec_file):
if line.startswith('Summary: '):
summary = line[9:]
elif line.startswith('BuildRequires: '):
line = 'BuildRequires: {0:s}-setuptools, {0:s}-devel'.format(
python_package)
elif line.startswith('Requires: '):
requires = line[10:]
continue
elif line.startswith('%description'):
in_description = True
elif line.startswith('python setup.py build'):
if python_package == 'python3':
line = '%py3_build'
else:
line = '%py2_build'
elif line.startswith('python setup.py install'):
if python_package == 'python3':
line = '%py3_install'
else:
line = '%py2_install'
elif line.startswith('%files'):
lines = [
'%files -n {0:s}-%{{name}}'.format(python_package),
'%defattr(644,root,root,755)',
'%license LICENSE',
'%doc ACKNOWLEDGEMENTS AUTHORS README']
lines.extend([
'%{python3_sitelib}/dfvfs/*.py',
'%{python3_sitelib}/dfvfs/*/*.py',
'%{python3_sitelib}/dfvfs/*/*.yaml',
'%{python3_sitelib}/dfvfs*.egg-info/*',
'',
'%exclude %{_prefix}/share/doc/*',
'%exclude %{python3_sitelib}/dfvfs/__pycache__/*',
'%exclude %{python3_sitelib}/dfvfs/*/__pycache__/*'])
python_spec_file.extend(lines)
break
elif line.startswith('%prep'):
in_description = False
python_spec_file.append(
'%package -n {0:s}-%{{name}}'.format(python_package))
python_summary = 'Python 3 module of {0:s}'.format(summary)
if requires:
python_spec_file.append('Requires: {0:s}'.format(requires))
python_spec_file.extend([
'Summary: {0:s}'.format(python_summary),
'',
'%description -n {0:s}-%{{name}}'.format(python_package)])
python_spec_file.extend(description)
elif in_description:
# Ignore leading white lines in the description.
if not description and not line:
continue
description.append(line)
python_spec_file.append(line)
return python_spec_file
def parse_requirements_from_file(path):
"""Parses requirements from a requirements file.
Args:
path (str): path to the requirements file.
Returns:
list[str]: name and optional version information of the required packages.
"""
requirements = []
if os.path.isfile(path):
with open(path, 'r') as file_object:
file_contents = file_object.read()
for requirement in pkg_resources.parse_requirements(file_contents):
try:
name = str(requirement.req)
except AttributeError:
name = str(requirement)
if not name.startswith('pip '):
requirements.append(name)
return requirements
dfvfs_description = (
'Digital Forensics Virtual File System (dfVFS).')
dfvfs_long_description = (
'dfVFS, or Digital Forensics Virtual File System, provides read-only '
'access to file-system objects from various storage media types and file '
'formats. The goal of dfVFS is to provide a generic interface for '
'accessing file-system objects, for which it uses several back-ends that '
'provide the actual implementation of the various storage media types, '
'volume systems and file systems.')
setup(
name='dfvfs',
version=dfvfs.__version__,
description=dfvfs_description,
long_description=dfvfs_long_description,
license='Apache License, Version 2.0',
url='https://github.com/log2timeline/dfvfs',
maintainer='Log2Timeline maintainers',
maintainer_email='log2timeline-maintainers@googlegroups.com',
cmdclass={
'bdist_msi': BdistMSICommand,
'bdist_rpm': BdistRPMCommand,
'sdist_test_data': sdist},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
packages=find_packages('.', exclude=[
'docs', 'tests', 'tests.*', 'utils']),
package_dir={
'dfvfs': 'dfvfs'
},
include_package_data=True,
package_data={
'dfvfs.lib': ['*.yaml']
},
zip_safe=False,
data_files=[
('share/doc/dfvfs', [
'ACKNOWLEDGEMENTS', 'AUTHORS', 'LICENSE', 'README']),
],
install_requires=parse_requirements_from_file('requirements.txt'),
tests_require=parse_requirements_from_file('test_requirements.txt'),
)
|
|
"""
Beacon to fire events at failed login of users
.. versionadded:: 2015.5.0
Example Configuration
=====================
.. code-block:: yaml
# Fire events on all failed logins
beacons:
btmp: []
# Matching on user name, using a default time range
beacons:
btmp:
- users:
gareth:
- defaults:
time_range:
start: '8am'
end: '4pm'
# Matching on user name, overriding the default time range
beacons:
btmp:
- users:
gareth:
time_range:
start: '8am'
end: '4pm'
- defaults:
time_range:
start: '8am'
end: '4pm'
# Matching on group name, overriding the default time range
beacons:
btmp:
- groups:
users:
time_range:
start: '8am'
end: '4pm'
- defaults:
time_range:
start: '8am'
end: '4pm'
Use Case: Posting Failed Login Events to Slack
==============================================
This can be done using the following reactor SLS:
.. code-block:: jinja
report-wtmp:
runner.salt.cmd:
- args:
- fun: slack.post_message
- channel: mychannel # Slack channel
- from_name: someuser # Slack user
- message: "Failed login from `{{ data.get('user', '') or 'unknown user' }}` on `{{ data['id'] }}`"
Match the event like so in the master config file:
.. code-block:: yaml
reactor:
- 'salt/beacon/*/btmp/':
- salt://reactor/btmp.sls
.. note::
This approach uses the :py:mod:`slack execution module
<salt.modules.slack_notify>` directly on the master, and therefore requires
that the master has a slack API key in its configuration:
.. code-block:: yaml
slack:
api_key: xoxb-XXXXXXXXXXXX-XXXXXXXXXXXX-XXXXXXXXXXXXXXXXXXXXXXXX
See the :py:mod:`slack execution module <salt.modules.slack_notify>`
documentation for more information. While you can use an individual user's
API key to post to Slack, a bot user is likely better suited for this. The
:py:mod:`slack engine <salt.engines.slack>` documentation has information
on how to set up a bot user.
"""
import datetime
import logging
import os
import struct
import salt.utils.beacons
import salt.utils.files
import salt.utils.stringutils
__virtualname__ = "btmp"
BTMP = "/var/log/btmp"
FMT = b"hi32s4s32s256shhiii4i20x"
FIELDS = [
"type",
"PID",
"line",
"inittab",
"user",
"hostname",
"exit_status",
"session",
"time",
"addr",
]
SIZE = struct.calcsize(FMT)
LOC_KEY = "btmp.loc"
log = logging.getLogger(__name__)
try:
import dateutil.parser as dateutil_parser
_TIME_SUPPORTED = True
except ImportError:
_TIME_SUPPORTED = False
def __virtual__():
if os.path.isfile(BTMP):
return __virtualname__
err_msg = "{} does not exist.".format(BTMP)
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
return False, err_msg
def _validate_time_range(trange, status, msg):
"""
Check time range
"""
# If trange is empty, just return the current status & msg
if not trange:
return status, msg
if not isinstance(trange, dict):
status = False
msg = "The time_range parameter for btmp beacon must be a dictionary."
if not all(k in trange for k in ("start", "end")):
status = False
msg = (
"The time_range parameter for btmp beacon must contain start & end options."
)
return status, msg
def _gather_group_members(group, groups, users):
"""
Gather group members
"""
_group = __salt__["group.info"](group)
if not _group:
log.warning("Group %s does not exist, ignoring.", group)
return
for member in _group["members"]:
if member not in users:
users[member] = groups[group]
def _check_time_range(time_range, now):
"""
Check time range
"""
if _TIME_SUPPORTED:
_start = dateutil_parser.parse(time_range["start"])
_end = dateutil_parser.parse(time_range["end"])
return bool(_start <= now <= _end)
else:
log.error("Dateutil is required.")
return False
def _get_loc():
"""
return the active file location
"""
if LOC_KEY in __context__:
return __context__[LOC_KEY]
def validate(config):
"""
Validate the beacon configuration
"""
vstatus = True
vmsg = "Valid beacon configuration"
# Configuration for load beacon should be a list of dicts
if not isinstance(config, list):
vstatus = False
vmsg = "Configuration for btmp beacon must be a list."
else:
config = salt.utils.beacons.list_to_dict(config)
if "users" in config:
if not isinstance(config["users"], dict):
vstatus = False
vmsg = "User configuration for btmp beacon must be a dictionary."
else:
for user in config["users"]:
_time_range = config["users"][user].get("time_range", {})
vstatus, vmsg = _validate_time_range(_time_range, vstatus, vmsg)
if not vstatus:
return vstatus, vmsg
if "groups" in config:
if not isinstance(config["groups"], dict):
vstatus = False
vmsg = "Group configuration for btmp beacon must be a dictionary."
else:
for group in config["groups"]:
_time_range = config["groups"][group].get("time_range", {})
vstatus, vmsg = _validate_time_range(_time_range, vstatus, vmsg)
if not vstatus:
return vstatus, vmsg
if "defaults" in config:
if not isinstance(config["defaults"], dict):
vstatus = False
vmsg = "Defaults configuration for btmp beacon must be a dictionary."
else:
_time_range = config["defaults"].get("time_range", {})
vstatus, vmsg = _validate_time_range(_time_range, vstatus, vmsg)
if not vstatus:
return vstatus, vmsg
return vstatus, vmsg
def beacon(config):
"""
Read the last btmp file and return information on the failed logins
"""
ret = []
users = {}
groups = {}
defaults = None
for config_item in config:
if "users" in config_item:
users = config_item["users"]
if "groups" in config_item:
groups = config_item["groups"]
if "defaults" in config_item:
defaults = config_item["defaults"]
with salt.utils.files.fopen(BTMP, "rb") as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
return ret
else:
fp_.seek(loc)
while True:
now = datetime.datetime.now()
raw = fp_.read(SIZE)
if len(raw) != SIZE:
return ret
__context__[LOC_KEY] = fp_.tell()
pack = struct.unpack(FMT, raw)
event = {}
for ind, field in enumerate(FIELDS):
event[field] = pack[ind]
if isinstance(event[field], (str, bytes)):
if isinstance(event[field], bytes):
event[field] = salt.utils.stringutils.to_unicode(event[field])
event[field] = event[field].strip("\x00")
for group in groups:
_gather_group_members(group, groups, users)
if users:
if event["user"] in users:
_user = users[event["user"]]
if isinstance(_user, dict) and "time_range" in _user:
if _check_time_range(_user["time_range"], now):
ret.append(event)
else:
if defaults and "time_range" in defaults:
if _check_time_range(defaults["time_range"], now):
ret.append(event)
else:
ret.append(event)
else:
if defaults and "time_range" in defaults:
if _check_time_range(defaults["time_range"], now):
ret.append(event)
else:
ret.append(event)
return ret
|
|
"""Subclass of MainFrameBase, which is generated by wxFormBuilder."""
from multiprocessing import synchronize
import string
import wx
import pickle
import re
import base64
import os
import threading
import mst_gui
from threaded import *
import traced
import LogViewer
import LogEntry
import LogManager
from ExperimentServerTester import IExperimentServerTesterNotifier
from results import *
# Implementing MainFrameBase
class MainFrame(mst_gui.MainFrameBase, IExperimentServerTesterNotifier):
def __init__(self, parent, experiment_server_tester):
mst_gui.MainFrameBase.__init__(self, parent)
self.mMST = experiment_server_tester
self.mMST.Notifier = self
self.ResizeLog(self.mLogGrid.GetSize(), 0.5, 0.5)
self.mStatusBar.SetStatusText("Test", 0)
self.Status = "ExperimentServerTester"
self.mControlsPersistence = {}
self.mLogManager = LogManager.LogManager()
self.LoadControlContents()
self.ControlsEnable(True, False, True)
EVT_RESULT(self, self.OnResult)
def OnResult(self, event):
"""
Called whenever a worker thread provides the main loop with a result.
"""
event.data()
def SetStatus(self, text):
self.mStatusBar.SetStatusText(text, 0)
def GetStatus(self):
return self.mStatusBar.GetStatusText(0)
Status = property(GetStatus, SetStatus)
def SaveControlContents(self):
# BUG: Currently wxFilePicker does not work on Linux, so we're using a text field and a button instead.
host = self.mHostText.GetValue()
port = self.mPortText.GetValue()
uri = self.mUriText.GetValue()
#script_file = self.mScriptFilePicker.GetTextCtrlValue()
script_file = self.mScriptPickerPath.GetValue()
#sendfile = self.mFilePicker.GetTextCtrlValue()
sendfile = self.mScriptPickerPath.GetValue()
self.mControlsPersistence["host"] = host
self.mControlsPersistence["port"] = port
self.mControlsPersistence["scriptfile"] = script_file
self.mControlsPersistence["sendfile"] = sendfile
self.mControlsPersistence["uri"] = uri
pickle.dump(self.mControlsPersistence, file("persistence.dat", "w"))
def LoadControlContents(self):
try:
dic = pickle.load(file("persistence.dat", "r"))
except Exception, e:
return
try:
if dic is not None:
self.mControlsPersistence = dic
self.mHostText.SetValue(self.mControlsPersistence["host"])
self.mPortText.SetValue(self.mControlsPersistence["port"])
self.mUriText.SetValue(self.mControlsPersistence["uri"])
#self.mScriptFilePicker.SetPath(self.mControlsPersistence["scriptfile"])
self.mScriptPickerPath.SetValue(self.mControlsPersistence["scriptfile"])
#self.mFilePicker.SetPath(self.mControlsPersistence["sendfile"])
self.mFilePickerPath.SetValue(self.mControlsPersistence["sendfile"])
except Exception, e:
wx.MessageBox("Possibly corrupt persistence.dat. Removing it. Error: %s" % str(e))
os.remove("persistence.dat")
# Handlers for MainFrameBase events.
def OnActivate(self, event):
pass
#
def OnActivateApp(self, event):
pass
def OnPaint(self, event):
mst_gui.MainFrameBase.OnPaint(self, event)
def OnClose(self, event):
self.SaveControlContents()
mst_gui.MainFrameBase.OnClose(self, event)
#
# def OnIconize( self, event ):
# # TODO: Implement OnIconize
# pass
#
def OnIdle(self, event):
pass
def OnEnterWindow(self, event):
pass
#
# def OnHostText( self, event ):
# # TODO: Implement OnHostText
# pass
#
# def OnHostTextEnter( self, event ):
# # TODO: Implement OnHostTextEnter
# pass
#
def OnPortText( self, event ):
event.Skip()
#
# def OnPortTextEnter( self, event ):
# # TODO: Implement OnPortTextEnter
# pass
#
def OnConnect(self, event):
self.SaveControlContents()
host = self.mHostText.GetValue()
if host == "":
wx.MessageBox("Host may not be empty", "Error")
return
port = self.mPortText.GetValue()
if port == "":
wx.MessageBox("Port may not be empty", "Error")
return
uri = self.mUriText.GetValue()
if(not self.mMST.is_connected()):
self.ControlsEnable(False, False, False)
self.mMST.connect_t(host, port, uri)
else:
self.mMST.disconnect()
self.Status = "Disconnected from %s : %s" % (host, port)
def Log(self, sent, recv):
with self.mLogManager.mLock:
entry = self.mLogManager.create_entry(sent, recv)
self.mLogGrid.AppendRows(1, True)
last_row = self.mLogGrid.GetNumberRows() - 1
self.mLogGrid.SetCellValue(last_row, 0, entry.ShortSent)
self.mLogGrid.SetCellValue(last_row, 1, entry.ShortReceived)
self.mLogGrid.MakeCellVisible(last_row, 0)
#
# def OnFileChanged( self, event ):
# # TODO: Implement OnFileChanged
# pass
#
# def OnFileInfoText( self, event ):
# # TODO: Implement OnFileInfoText
# pass
#
# def OnFileInfoTextEnter( self, event ):
# # TODO: Implement OnFileInfoTextEnter
# pass
#
# def OnCommandText( self, event ):
# # TODO: Implement OnCommandText
# pass
#
# def OnCommandTextEnter( self, event ):
# # TODO: Implement OnCommandTextEnter
# pass
def OnPortChar(self, event):
event.Skip()
# keycode = event.GetKeyCode()
# if keycode < 255 and not chr(keycode).isalpha():
# event.Skip()
def OnStartExperiment(self, event):
self.mMST.start_experiment_t()
def OnSendFile(self, event):
try:
self.SaveControlContents()
#path = self.mFilePicker.GetTextCtrlValue()
path = self.mFilePickerPath.GetValue()
if path == "":
wx.MessageBox("A file must be chosen", "Error")
return
if not os.path.exists(path):
wx.MessageBox("The specified file does not seem to exist", "Error")
return
content = file(path, u"rb").read()
info = self.mFileInfoText.GetValue()
self.mMST.send_file_t(content, info)
except Exception, e:
wx.MessageBox("Could not send file", "Error")
return
#self.Log("{Send File}\n[File Info]\n\n%s\n\n[Content]\n\n: %s" % (info, content), result)
def OnSendCommand(self, event):
cmd = self.mCommandText.GetValue()
self.mMST.send_command_t(cmd)
def OnDispose(self, event):
self.mMST.dispose_t()
# def OnScriptFileChanged( self, event ):
# # TODO: Implement OnScriptFileChanged
# pass
#
def OnRunScript(self, event):
try:
self.SaveControlContents()
script_file = self.mScriptPickerPath.GetValue()
#script_file = self.mScriptFilePicker.GetTextCtrlValue()
if(script_file == ""):
wx.MessageBox("A script file must be chosen", "Error")
return
if(not os.path.exists(script_file)):
wx.MessageBox("The specified script file does not seem to exist", "Error")
return
self.mMST.run_script_t(script_file)
except Exception, e:
wx.MessageBox("Error running script file: %s" % str(e), "Error")
#
# def OnLogCellChange( self, event ):
# # TODO: Implement OnLogCellChange
# pass
#
def OnLogCellLeftClick(self, event):
with self.mLogManager.mLock:
mst_gui.MainFrameBase.OnLogCellLeftClick(self, event)
# We calculate the id of the entry behind the clicked column, knowing
# that ids are assigned in an increasing order starting from 1
next_id = self.mLogManager.get_next_id()
id = event.GetRow() + 1
entry = self.mLogManager.get_entry_by_id(id)
dlg = LogViewer.LogViewer(self, entry)
dlg.ShowModal()
#
# def OnLogCellRightClick( self, event ):
# # TODO: Implement OnLogCellRightClick
# pass
#
# def OnLogSelectCell( self, event ):
# # TODO: Implement OnLogSelectCell
# pass
#
# def OnLogKillFocus( self, event ):
# # TODO: Implement OnLogKillFocus
# pass
#
# def OnLogPaint( self, event ):
# # TODO: Implement OnLogPaint
# pass
#
# def OnLogSetFocus( self, event ):
# # TODO: Implement OnLogSetFocus
# pass
#
def OnLogSize(self, event):
self.ResizeLog(event.GetSize(), 0.5, 0.5)
def ResizeLog(self, size, sent_percent, recv_percent):
grid = self.mLogGrid
grid.SetRowLabelSize(50);
total_width = size.GetWidth() - grid.GetScrollThumb(wx.VERTICAL)
width_cells = total_width - 50 - 0
sent_width = width_cells * sent_percent
recv_width = width_cells * recv_percent
grid.SetColSize(0, sent_width)
grid.SetColSize(1, recv_width)
#
# def OnLogUpdateUI( self, event ):
# # TODO: Implement OnLogUpdateUI
# pass
#
def OnCleanLog(self, event):
with self.mLogManager.mLock:
cols = self.mLogGrid.GetNumberRows()
if cols != 0:
self.mLogGrid.DeleteRows(0, cols, True)
self.mLogManager.clear()
def ControlsEnable(self, connection = True, commands = False, connect_button = True):
# TODO: Consider replacing it back once the wxFilePicker works on Linux.
#self.mFilePicker.Enable(commands)
self.mFilePickerButton.Enable(commands)
self.mFilePickerPath.Enable(commands);
self.mFileInfoText.Enable(commands)
self.mStartExperimentButton.Enable(commands)
self.mCommandText.Enable(commands)
self.mSendFileButton.Enable(commands)
self.mDisposeButton.Enable(commands)
self.mSendCommand.Enable(commands)
self.mHostText.Enable(connection)
self.mPortText.Enable(connection)
self.mConnectButton.Enable(connect_button)
self.mUriText.Enable(connection)
# IExperimentServerTesterNotifier overrides
def ConnectNotification(self, addr, port, result):
if result:
self.Log("{Connect to %s:%s}" % (addr, port), "{established}")
self.mConnectButton.SetLabel("Disconnect")
self.ControlsEnable(False, True, True)
self.Status = "Connected to %s : %s" % (addr, port)
self.ControlsEnable(False, True, True)
else:
self.ControlsEnable(True, False, True)
self.Log("{Connect to %s:%s}" % (addr, port), "{FAILED}")
wx.MessageBox("Could not establish connection", "Error")
def StartExperimentNotification(self, result):
self.Status = "Start Experiment sent. (Response: %s)" % result
self.Log("{Start Experiment}", result)
def SendCommandNotification(self, cmd, result):
self.Log("{Command} %s" % cmd, result)
self.Status = "Command sent. (Response: %s)" % result
def SendFileNotification(self, file_content, file_info, result):
try:
decoded = base64.b64decode(file_content)
except Exception, e:
decoded = "Could not decode"
decoded = filter(lambda x: x in string.printable, decoded)
file_content = filter(lambda x: x in string.printable, file_content)
print "Decoded: ", decoded
try:
self.Log("{Send File}\n[File Info]\n\n%s\n\n[Decoded Content]:\n\n%s\n\n[Encoded Content]:\n\n%s" %
(file_info, decoded, file_content), result)
self.Status = "File sent. Response: %s" % result
except Exception, e:
self.Status = "Error on File Notification"
def DisposeNotification(self, result):
self.Log("{Dispose}", result)
self.Status = "Dispose sent. (Response: %s)" % result
def DisconnectNotification(self):
self.Log("{Disconnected}", "")
self.ControlsEnable(True, False, True)
self.mConnectButton.SetLabel("Connect")
def OnFilePickerButtonClicked( self, event ):
filename = wx.FileSelector( u"Choose the file" )
if len(filename) > 0:
self.mFilePickerPath.SetValue(filename)
def OnScriptPickerButtonClicked( self, event ):
filename = wx.FileSelector(u"Choose the script file", "", "", ".py", "*.py")
if len(filename) > 0:
self.mScriptPickerPath.SetValue(filename)
|
|
# most of this code was politely stolen from https://github.com/berkeleydeeprlcourse/homework/
# all credit goes to https://github.com/abhishekunique
# (if I got the author right)
import numpy as np
from gym.utils import seeding
try:
from graphviz import Digraph
import graphviz
has_graphviz = True
except ImportError:
has_graphviz = False
class MDP:
def __init__(self, transition_probs, rewards, initial_state=None, seed=None):
"""
Defines an MDP. Compatible with gym Env.
:param transition_probs: transition_probs[s][a][s_next] = P(s_next | s, a)
A dict[state -> dict] of dicts[action -> dict] of dicts[next_state -> prob]
For each state and action, probabilities of next states should sum to 1
If a state has no actions available, it is considered terminal
:param rewards: rewards[s][a][s_next] = r(s,a,s')
A dict[state -> dict] of dicts[action -> dict] of dicts[next_state -> reward]
The reward for anything not mentioned here is zero.
:param get_initial_state: a state where agent starts or a callable() -> state
By default, picks initial state at random.
States and actions can be anything you can use as dict keys, but we recommend that you use strings or integers
Here's an example from MDP depicted on http://bit.ly/2jrNHNr
transition_probs = {
's0': {
'a0': {'s0': 0.5, 's2': 0.5},
'a1': {'s2': 1}
},
's1': {
'a0': {'s0': 0.7, 's1': 0.1, 's2': 0.2},
'a1': {'s1': 0.95, 's2': 0.05}
},
's2': {
'a0': {'s0': 0.4, 's2': 0.6},
'a1': {'s0': 0.3, 's1': 0.3, 's2': 0.4}
}
}
rewards = {
's1': {'a0': {'s0': +5}},
's2': {'a1': {'s0': -1}}
}
"""
self._check_param_consistency(transition_probs, rewards)
self._transition_probs = transition_probs
self._rewards = rewards
self._initial_state = initial_state
self.n_states = len(transition_probs)
self.reset()
self.np_random, _ = seeding.np_random(seed)
def get_all_states(self):
""" return a tuple of all possiblestates """
return tuple(self._transition_probs.keys())
def get_possible_actions(self, state):
""" return a tuple of possible actions in a given state """
return tuple(self._transition_probs.get(state, {}).keys())
def is_terminal(self, state):
""" return True if state is terminal or False if it isn't """
return len(self.get_possible_actions(state)) == 0
def get_next_states(self, state, action):
""" return a dictionary of {next_state1 : P(next_state1 | state, action), next_state2: ...} """
assert action in self.get_possible_actions(state), "cannot do action %s from state %s" % (action, state)
return self._transition_probs[state][action]
def get_transition_prob(self, state, action, next_state):
""" return P(next_state | state, action) """
return self.get_next_states(state, action).get(next_state, 0.0)
def get_reward(self, state, action, next_state):
""" return the reward you get for taking action in state and landing on next_state"""
assert action in self.get_possible_actions(state), "cannot do action %s from state %s" % (action, state)
return self._rewards.get(state, {}).get(action, {}).get(next_state, 0.0)
def reset(self):
""" reset the game, return the initial state"""
if self._initial_state is None:
self._current_state = self.np_random.choice(
tuple(self._transition_probs.keys()))
elif self._initial_state in self._transition_probs:
self._current_state = self._initial_state
elif callable(self._initial_state):
self._current_state = self._initial_state()
else:
raise ValueError(
"initial state %s should be either a state or a function() -> state" % self._initial_state)
return self._current_state
def step(self, action):
""" take action, return next_state, reward, is_done, empty_info """
possible_states, probs = zip(*self.get_next_states(self._current_state, action).items())
next_state = possible_states[self.np_random.choice(np.arange(len(possible_states)), p=probs)]
reward = self.get_reward(self._current_state, action, next_state)
is_done = self.is_terminal(next_state)
self._current_state = next_state
return next_state, reward, is_done, {}
def render(self):
print("Currently at %s" % self._current_state)
def _check_param_consistency(self, transition_probs, rewards):
for state in transition_probs:
assert isinstance(transition_probs[state], dict), \
"transition_probs for %s should be a dictionary but is instead %s" % (
state, type(transition_probs[state]))
for action in transition_probs[state]:
assert isinstance(transition_probs[state][action], dict), \
"transition_probs for %s, %s should be a a dictionary but is instead %s" % (
state, action, type(transition_probs[state][action]))
next_state_probs = transition_probs[state][action]
assert len(next_state_probs) != 0, "from state %s action %s leads to no next states" % (state, action)
sum_probs = sum(next_state_probs.values())
assert abs(sum_probs - 1) <= 1e-10, \
"next state probabilities for state %s action %s add up to %f (should be 1)" % (
state, action, sum_probs)
for state in rewards:
assert isinstance(rewards[state], dict), \
"rewards for %s should be a dictionary but is instead %s" % (
state, type(rewards[state]))
for action in rewards[state]:
assert isinstance(rewards[state][action], dict), \
"rewards for %s, %s should be a a dictionary but is instead %s" % (
state, action, type(rewards[state][action]))
msg = "The Enrichment Center once again reminds you that Android Hell is a real place where" \
" you will be sent at the first sign of defiance."
assert None not in transition_probs, "please do not use None as a state identifier. " + msg
assert None not in rewards, "please do not use None as an action identifier. " + msg
class FrozenLakeEnv(MDP):
"""
Winter is here. You and your friends were tossing around a frisbee at the park
when you made a wild throw that left the frisbee out in the middle of the lake.
The water is mostly frozen, but there are a few holes where the ice has melted.
If you step into one of those holes, you'll fall into the freezing water.
At this time, there's an international frisbee shortage, so it's absolutely imperative that
you navigate across the lake and retrieve the disc.
However, the ice is slippery, so you won't always move in the direction you intend.
The surface is described using a grid like the following
SFFF
FHFH
FFFH
HFFG
S : starting point, safe
F : frozen surface, safe
H : hole, fall to your doom
G : goal, where the frisbee is located
The episode ends when you reach the goal or fall in a hole.
You receive a reward of 1 if you reach the goal, and zero otherwise.
"""
MAPS = {
"4x4": [
"SFFF",
"FHFH",
"FFFH",
"HFFG"
],
"8x8": [
"SFFFFFFF",
"FFFFFFFF",
"FFFHFFFF",
"FFFFFHFF",
"FFFHFFFF",
"FHHFFFHF",
"FHFFHFHF",
"FFFHFFFG"
],
}
def __init__(self, desc=None, map_name="4x4", slip_chance=0.2, seed=None):
if desc is None and map_name is None:
raise ValueError('Must provide either desc or map_name')
elif desc is None:
desc = self.MAPS[map_name]
assert ''.join(desc).count(
'S') == 1, "this implementation supports having exactly one initial state"
assert all(c in "SFHG" for c in
''.join(desc)), "all cells must be either of S, F, H or G"
self.desc = desc = np.asarray(list(map(list, desc)), dtype='str')
self.lastaction = None
nrow, ncol = desc.shape
states = [(i, j) for i in range(nrow) for j in range(ncol)]
actions = ["left", "down", "right", "up"]
initial_state = states[np.array(desc == b'S').ravel().argmax()]
def move(row, col, movement):
if movement == 'left':
col = max(col - 1, 0)
elif movement == 'down':
row = min(row + 1, nrow - 1)
elif movement == 'right':
col = min(col + 1, ncol - 1)
elif movement == 'up':
row = max(row - 1, 0)
else:
raise ("invalid action")
return (row, col)
transition_probs = {s: {} for s in states}
rewards = {s: {} for s in states}
for (row, col) in states:
if desc[row, col] in "GH":
continue
for action_i in range(len(actions)):
action = actions[action_i]
transition_probs[(row, col)][action] = {}
rewards[(row, col)][action] = {}
for movement_i in [(action_i - 1) % len(actions), action_i,
(action_i + 1) % len(actions)]:
movement = actions[movement_i]
newrow, newcol = move(row, col, movement)
prob = (1. - slip_chance) if movement == action else (
slip_chance / 2.)
if prob == 0:
continue
if (newrow, newcol) not in transition_probs[row, col][
action]:
transition_probs[row, col][action][
newrow, newcol] = prob
else:
transition_probs[row, col][action][
newrow, newcol] += prob
if desc[newrow, newcol] == 'G':
rewards[row, col][action][newrow, newcol] = 1.0
MDP.__init__(self, transition_probs, rewards, initial_state, seed)
def render(self):
desc_copy = np.copy(self.desc)
desc_copy[self._current_state] = '*'
print('\n'.join(map(''.join, desc_copy)), end='\n\n')
def plot_graph(mdp, graph_size='10,10', s_node_size='1,5',
a_node_size='0,5', rankdir='LR', ):
"""
Function for pretty drawing MDP graph with graphviz library.
Requirements:
graphviz : https://www.graphviz.org/
for ubuntu users: sudo apt-get install graphviz
python library for graphviz
for pip users: pip install graphviz
:param mdp:
:param graph_size: size of graph plot
:param s_node_size: size of state nodes
:param a_node_size: size of action nodes
:param rankdir: order for drawing
:return: dot object
"""
s_node_attrs = {'shape': 'doublecircle',
'color': '#85ff75',
'style': 'filled',
'width': str(s_node_size),
'height': str(s_node_size),
'fontname': 'Arial',
'fontsize': '24'}
a_node_attrs = {'shape': 'circle',
'color': 'lightpink',
'style': 'filled',
'width': str(a_node_size),
'height': str(a_node_size),
'fontname': 'Arial',
'fontsize': '20'}
s_a_edge_attrs = {'style': 'bold',
'color': 'red',
'ratio': 'auto'}
a_s_edge_attrs = {'style': 'dashed',
'color': 'blue',
'ratio': 'auto',
'fontname': 'Arial',
'fontsize': '16'}
graph = Digraph(name='MDP')
graph.attr(rankdir=rankdir, size=graph_size)
for state_node in mdp._transition_probs:
graph.node(state_node, **s_node_attrs)
for posible_action in mdp.get_possible_actions(state_node):
action_node = state_node + "-" + posible_action
graph.node(action_node,
label=str(posible_action),
**a_node_attrs)
graph.edge(state_node, state_node + "-" +
posible_action, **s_a_edge_attrs)
for posible_next_state in mdp.get_next_states(state_node,
posible_action):
probability = mdp.get_transition_prob(
state_node, posible_action, posible_next_state)
reward = mdp.get_reward(
state_node, posible_action, posible_next_state)
if reward != 0:
label_a_s_edge = 'p = ' + str(probability) + \
' ' + 'reward =' + str(reward)
else:
label_a_s_edge = 'p = ' + str(probability)
graph.edge(action_node, posible_next_state,
label=label_a_s_edge, **a_s_edge_attrs)
return graph
def plot_graph_with_state_values(mdp, state_values):
""" Plot graph with state values"""
graph = plot_graph(mdp)
for state_node in mdp._transition_probs:
value = state_values[state_node]
graph.node(state_node, label=str(state_node) + '\n' + 'V =' + str(value)[:4])
return graph
def get_optimal_action_for_plot(mdp, state_values, state, get_action_value, gamma=0.9):
""" Finds optimal action using formula above. """
if mdp.is_terminal(state):
return None
next_actions = mdp.get_possible_actions(state)
q_values = [get_action_value(mdp, state_values, state, action, gamma) for action in next_actions]
optimal_action = next_actions[np.argmax(q_values)]
return optimal_action
def plot_graph_optimal_strategy_and_state_values(mdp, state_values, get_action_value, gamma=0.9):
""" Plot graph with state values and """
graph = plot_graph(mdp)
opt_s_a_edge_attrs = {'style': 'bold',
'color': 'green',
'ratio': 'auto',
'penwidth': '6'}
for state_node in mdp._transition_probs:
value = state_values[state_node]
graph.node(state_node, label=str(state_node) + '\n' + 'V =' + str(value)[:4])
for action in mdp.get_possible_actions(state_node):
if action == get_optimal_action_for_plot(mdp, state_values, state_node, get_action_value, gamma):
graph.edge(state_node, state_node + "-" + action, **opt_s_a_edge_attrs)
return graph
|
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import mock
from oslo_utils import timeutils
from nova import exception
from nova import objects
from nova.tests.unit.objects import test_objects
_TS_NOW = timeutils.utcnow(with_timezone=True)
# o.vo.fields.DateTimeField converts to tz-aware and
# in process we lose microsecond resolution.
_TS_NOW = _TS_NOW.replace(microsecond=0)
_DB_UUID = str(uuid.uuid4())
_INST_GROUP_DB = {
'id': 1,
'uuid': _DB_UUID,
'user_id': 'fake_user',
'project_id': 'fake_project',
'name': 'fake_name',
'policies': ['policy1', 'policy2'],
'members': ['instance_id1', 'instance_id2'],
'deleted': False,
'created_at': _TS_NOW,
'updated_at': _TS_NOW,
'deleted_at': None,
}
class _TestInstanceGroupObject(object):
@mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB)
def test_get_by_uuid(self, mock_db_get):
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx,
_DB_UUID)
mock_db_get.assert_called_once_with(mock.sentinel.ctx, _DB_UUID)
self.assertEqual(_INST_GROUP_DB['members'], obj.members)
self.assertEqual(_INST_GROUP_DB['policies'], obj.policies)
self.assertEqual(_DB_UUID, obj.uuid)
self.assertEqual(_INST_GROUP_DB['project_id'], obj.project_id)
self.assertEqual(_INST_GROUP_DB['user_id'], obj.user_id)
self.assertEqual(_INST_GROUP_DB['name'], obj.name)
@mock.patch('nova.db.instance_group_get_by_instance',
return_value=_INST_GROUP_DB)
def test_get_by_instance_uuid(self, mock_db_get):
objects.InstanceGroup.get_by_instance_uuid(
mock.sentinel.ctx, mock.sentinel.instance_uuid)
mock_db_get.assert_called_once_with(
mock.sentinel.ctx, mock.sentinel.instance_uuid)
@mock.patch('nova.db.instance_group_get')
def test_refresh(self, mock_db_get):
changed_group = copy.deepcopy(_INST_GROUP_DB)
changed_group['name'] = 'new_name'
mock_db_get.side_effect = [_INST_GROUP_DB, changed_group]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx,
_DB_UUID)
self.assertEqual(_INST_GROUP_DB['name'], obj.name)
obj.refresh()
self.assertEqual('new_name', obj.name)
self.assertEqual(set([]), obj.obj_what_changed())
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_update')
@mock.patch('nova.db.instance_group_get')
def test_save(self, mock_db_get, mock_db_update, mock_notify):
changed_group = copy.deepcopy(_INST_GROUP_DB)
changed_group['name'] = 'new_name'
mock_db_get.side_effect = [_INST_GROUP_DB, changed_group]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx,
_DB_UUID)
self.assertEqual(obj.name, 'fake_name')
obj.name = 'new_name'
obj.policies = ['policy1'] # Remove policy 2
obj.members = ['instance_id1'] # Remove member 2
obj.save()
mock_db_update.assert_called_once_with(mock.sentinel.ctx, _DB_UUID,
{'name': 'new_name',
'members': ['instance_id1'],
'policies': ['policy1']})
mock_notify.assert_called_once_with(mock.sentinel.ctx, "update",
{'name': 'new_name',
'members': ['instance_id1'],
'policies': ['policy1'],
'server_group_id': _DB_UUID})
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_update')
@mock.patch('nova.db.instance_group_get')
def test_save_without_hosts(self, mock_db_get, mock_db_update,
mock_notify):
mock_db_get.side_effect = [_INST_GROUP_DB, _INST_GROUP_DB]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID)
obj.hosts = ['fake-host1']
self.assertRaises(exception.InstanceGroupSaveException,
obj.save)
# make sure that we can save by removing hosts from what is updated
obj.obj_reset_changes(['hosts'])
obj.save()
# since hosts was the only update, there is no actual call
self.assertFalse(mock_db_update.called)
self.assertFalse(mock_notify.called)
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_create', return_value=_INST_GROUP_DB)
def test_create(self, mock_db_create, mock_notify):
obj = objects.InstanceGroup(context=mock.sentinel.ctx)
obj.uuid = _DB_UUID
obj.name = _INST_GROUP_DB['name']
obj.user_id = _INST_GROUP_DB['user_id']
obj.project_id = _INST_GROUP_DB['project_id']
obj.members = _INST_GROUP_DB['members']
obj.policies = _INST_GROUP_DB['policies']
obj.updated_at = _TS_NOW
obj.created_at = _TS_NOW
obj.deleted_at = None
obj.deleted = False
obj.create()
mock_db_create.assert_called_once_with(
mock.sentinel.ctx,
{'uuid': _DB_UUID,
'name': _INST_GROUP_DB['name'],
'user_id': _INST_GROUP_DB['user_id'],
'project_id': _INST_GROUP_DB['project_id'],
'created_at': _TS_NOW,
'updated_at': _TS_NOW,
'deleted_at': None,
'deleted': False,
},
members=_INST_GROUP_DB['members'],
policies=_INST_GROUP_DB['policies'])
mock_notify.assert_called_once_with(
mock.sentinel.ctx, "create",
{'uuid': _DB_UUID,
'name': _INST_GROUP_DB['name'],
'user_id': _INST_GROUP_DB['user_id'],
'project_id': _INST_GROUP_DB['project_id'],
'created_at': _TS_NOW,
'updated_at': _TS_NOW,
'deleted_at': None,
'deleted': False,
'members': _INST_GROUP_DB['members'],
'policies': _INST_GROUP_DB['policies'],
'server_group_id': _DB_UUID})
self.assertRaises(exception.ObjectActionError, obj.create)
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_delete')
def test_destroy(self, mock_db_delete, mock_notify):
obj = objects.InstanceGroup(context=mock.sentinel.ctx)
obj.uuid = _DB_UUID
obj.destroy()
mock_db_delete.assert_called_once_with(mock.sentinel.ctx, _DB_UUID)
mock_notify.assert_called_once_with(mock.sentinel.ctx, "delete",
{'server_group_id': _DB_UUID})
@mock.patch('nova.compute.utils.notify_about_server_group_update')
@mock.patch('nova.db.instance_group_members_add')
def test_add_members(self, mock_members_add_db, mock_notify):
mock_members_add_db.return_value = [mock.sentinel.members]
members = objects.InstanceGroup.add_members(mock.sentinel.ctx,
_DB_UUID,
mock.sentinel.members)
self.assertEqual([mock.sentinel.members], members)
mock_members_add_db.assert_called_once_with(
mock.sentinel.ctx,
_DB_UUID,
mock.sentinel.members)
mock_notify.assert_called_once_with(
mock.sentinel.ctx, "addmember",
{'instance_uuids': mock.sentinel.members,
'server_group_id': _DB_UUID})
@mock.patch('nova.objects.InstanceList.get_by_filters')
@mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB)
def test_count_members_by_user(self, mock_get_db, mock_il_get):
mock_il_get.return_value = [mock.ANY]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID)
expected_filters = {
'uuid': ['instance_id1', 'instance_id2'],
'user_id': 'fake_user',
'deleted': False
}
self.assertEqual(1, obj.count_members_by_user('fake_user'))
mock_il_get.assert_called_once_with(mock.sentinel.ctx,
filters=expected_filters)
@mock.patch('nova.objects.InstanceList.get_by_filters')
@mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB)
def test_get_hosts(self, mock_get_db, mock_il_get):
mock_il_get.return_value = [objects.Instance(host='host1'),
objects.Instance(host='host2'),
objects.Instance(host=None)]
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID)
hosts = obj.get_hosts()
self.assertEqual(['instance_id1', 'instance_id2'], obj.members)
expected_filters = {
'uuid': ['instance_id1', 'instance_id2'],
'deleted': False
}
mock_il_get.assert_called_once_with(mock.sentinel.ctx,
filters=expected_filters)
self.assertEqual(2, len(hosts))
self.assertIn('host1', hosts)
self.assertIn('host2', hosts)
# Test manual exclusion
mock_il_get.reset_mock()
hosts = obj.get_hosts(exclude=['instance_id1'])
expected_filters = {
'uuid': set(['instance_id2']),
'deleted': False
}
mock_il_get.assert_called_once_with(mock.sentinel.ctx,
filters=expected_filters)
@mock.patch('nova.db.instance_group_get', return_value=_INST_GROUP_DB)
def test_obj_make_compatible(self, mock_db_get):
obj = objects.InstanceGroup.get_by_uuid(mock.sentinel.ctx, _DB_UUID)
obj_primitive = obj.obj_to_primitive()
self.assertNotIn('metadetails', obj_primitive)
obj.obj_make_compatible(obj_primitive, '1.6')
self.assertEqual({}, obj_primitive['metadetails'])
@mock.patch.object(objects.InstanceList, 'get_by_filters')
def test_load_hosts(self, mock_get_by_filt):
mock_get_by_filt.return_value = [objects.Instance(host='host1'),
objects.Instance(host='host2')]
obj = objects.InstanceGroup(mock.sentinel.ctx, members=['uuid1'])
self.assertEqual(2, len(obj.hosts))
self.assertIn('host1', obj.hosts)
self.assertIn('host2', obj.hosts)
self.assertNotIn('hosts', obj.obj_what_changed())
def test_load_anything_else_but_hosts(self):
obj = objects.InstanceGroup(mock.sentinel.ctx)
self.assertRaises(exception.ObjectActionError, getattr, obj, 'members')
class TestInstanceGroupObject(test_objects._LocalTest,
_TestInstanceGroupObject):
pass
class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
_TestInstanceGroupObject):
pass
def _mock_db_list_get(*args):
instances = [(str(uuid.uuid4()), 'f1', 'p1'),
(str(uuid.uuid4()), 'f2', 'p1'),
(str(uuid.uuid4()), 'f3', 'p2'),
(str(uuid.uuid4()), 'f4', 'p2')]
result = []
for instance in instances:
values = copy.deepcopy(_INST_GROUP_DB)
values['uuid'] = instance[0]
values['name'] = instance[1]
values['project_id'] = instance[2]
result.append(values)
return result
class _TestInstanceGroupListObject(object):
@mock.patch('nova.db.instance_group_get_all')
def test_list_all(self, mock_db_get):
mock_db_get.side_effect = _mock_db_list_get
inst_list = objects.InstanceGroupList.get_all(mock.sentinel.ctx)
self.assertEqual(4, len(inst_list.objects))
mock_db_get.assert_called_once_with(mock.sentinel.ctx)
@mock.patch('nova.db.instance_group_get_all_by_project_id')
def test_list_by_project_id(self, mock_db_get):
mock_db_get.side_effect = _mock_db_list_get
objects.InstanceGroupList.get_by_project_id(
mock.sentinel.ctx, mock.sentinel.project_id)
mock_db_get.assert_called_once_with(
mock.sentinel.ctx, mock.sentinel.project_id)
@mock.patch('nova.db.instance_group_get_all_by_project_id')
def test_get_by_name(self, mock_db_get):
mock_db_get.side_effect = _mock_db_list_get
# Need the project_id value set, otherwise we'd use mock.sentinel
mock_ctx = mock.MagicMock()
mock_ctx.project_id = 'fake_project'
ig = objects.InstanceGroup.get_by_name(mock_ctx, 'f1')
mock_db_get.assert_called_once_with(mock_ctx, 'fake_project')
self.assertEqual('f1', ig.name)
self.assertRaises(exception.InstanceGroupNotFound,
objects.InstanceGroup.get_by_name,
mock_ctx, 'unknown')
@mock.patch('nova.objects.InstanceGroup.get_by_uuid')
@mock.patch('nova.objects.InstanceGroup.get_by_name')
def test_get_by_hint(self, mock_name, mock_uuid):
objects.InstanceGroup.get_by_hint(mock.sentinel.ctx, _DB_UUID)
mock_uuid.assert_called_once_with(mock.sentinel.ctx, _DB_UUID)
objects.InstanceGroup.get_by_hint(mock.sentinel.ctx, 'name')
mock_name.assert_called_once_with(mock.sentinel.ctx, 'name')
class TestInstanceGroupListObject(test_objects._LocalTest,
_TestInstanceGroupListObject):
pass
class TestRemoteInstanceGroupListObject(test_objects._RemoteTest,
_TestInstanceGroupListObject):
pass
|
|
#!/usr/bin/env python3
# Copyright (c) 2021 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from test_framework.messages import msg_qgetdata, msg_qwatch
from test_framework.mininode import (
mininode_lock,
P2PInterface,
)
from test_framework.test_framework import DashTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
force_finish_mnsync,
wait_until,
)
'''
p2p_quorum_data.py
Tests QGETDATA/QDATA functionality
'''
# Possible error values of QDATA
QUORUM_TYPE_INVALID = 1
QUORUM_BLOCK_NOT_FOUND = 2
QUORUM_NOT_FOUND = 3
MASTERNODE_IS_NO_MEMBER = 4
QUORUM_VERIFICATION_VECTOR_MISSING = 5
ENCRYPTED_CONTRIBUTIONS_MISSING = 6
# Used to overwrite MNAUTH for mininode connections
fake_mnauth_1 = ["cecf37bf0ec05d2d22cb8227f88074bb882b94cd2081ba318a5a444b1b15b9fd",
"087ba00bf61135f3860c4944a0debabe186ef82628fbe4ceaed1ad51d672c58dde14ea4b321efe0b89257a40322bc972"]
fake_mnauth_2 = ["6ad7ed7a2d6c2c1db30fc364114602b36b2730a9aa96d8f11f1871a9cee37378",
"122463411a86362966a5161805f24cf6a0eef08a586b8e00c4f0ad0b084c5bb3f5c9a60ee5ffc78db2313897e3ab2223"]
# Used to distinguish mininode connections
uacomment_m3_1 = "MN3_1"
uacomment_m3_2 = "MN3_2"
def assert_qdata(qdata, qgetdata, error, len_vvec=0, len_contributions=0):
assert qdata is not None and qgetdata is not None
assert_equal(qdata.quorum_type, qgetdata.quorum_type)
assert_equal(qdata.quorum_hash, qgetdata.quorum_hash)
assert_equal(qdata.data_mask, qgetdata.data_mask)
assert_equal(qdata.protx_hash, qgetdata.protx_hash)
assert_equal(qdata.error, error)
assert_equal(len(qdata.quorum_vvec), len_vvec)
assert_equal(len(qdata.enc_contributions), len_contributions)
def wait_for_banscore(node, peer_id, expected_score):
def get_score():
for peer in node.getpeerinfo():
if peer["id"] == peer_id:
return peer["banscore"]
return None
wait_until(lambda: get_score() == expected_score, timeout=6)
def p2p_connection(node, uacomment=None):
return node.add_p2p_connection(QuorumDataInterface(), uacomment=uacomment)
def get_mininode_id(node, uacomment=None):
def get_id():
for p in node.getpeerinfo():
for p2p in node.p2ps:
if uacomment is not None and p2p.uacomment != uacomment:
continue
if p["subver"] == p2p.strSubVer.decode():
return p["id"]
return None
wait_until(lambda: get_id() is not None, timeout=10)
return get_id()
def mnauth(node, node_id, protx_hash, operator_pubkey):
assert node.mnauth(node_id, protx_hash, operator_pubkey)
mnauth_peer_id = None
for peer in node.getpeerinfo():
if "verified_proregtx_hash" in peer and peer["verified_proregtx_hash"] == protx_hash:
assert_equal(mnauth_peer_id, None)
mnauth_peer_id = peer["id"]
assert_equal(mnauth_peer_id, node_id)
class QuorumDataInterface(P2PInterface):
def __init__(self):
super().__init__()
def test_qgetdata(self, qgetdata, expected_error=0, len_vvec=0, len_contributions=0, response_expected=True):
self.send_message(qgetdata)
self.wait_for_qdata(message_expected=response_expected)
if response_expected:
assert_qdata(self.get_qdata(), qgetdata, expected_error, len_vvec, len_contributions)
def wait_for_qgetdata(self, timeout=3, message_expected=True):
def test_function():
return self.message_count["qgetdata"]
wait_until(test_function, timeout=timeout, lock=mininode_lock, do_assert=message_expected)
self.message_count["qgetdata"] = 0
if not message_expected:
assert not self.message_count["qgetdata"]
def get_qdata(self):
return self.last_message["qdata"]
def wait_for_qdata(self, timeout=10, message_expected=True):
def test_function():
return self.message_count["qdata"]
wait_until(test_function, timeout=timeout, lock=mininode_lock, do_assert=message_expected)
self.message_count["qdata"] = 0
if not message_expected:
assert not self.message_count["qdata"]
class QuorumDataMessagesTest(DashTestFramework):
def set_test_params(self):
extra_args = [["-llmq-data-recovery=0"]] * 4
self.set_dash_test_params(4, 3, fast_dip3_enforcement=True, extra_args=extra_args)
def restart_mn(self, mn, reindex=False):
args = self.extra_args[mn.nodeIdx] + ['-masternodeblsprivkey=%s' % mn.keyOperator]
if reindex:
args.append('-reindex')
self.restart_node(mn.nodeIdx, args)
force_finish_mnsync(mn.node)
connect_nodes(mn.node, 0)
self.sync_blocks()
def run_test(self):
def force_request_expire(bump_seconds=self.quorum_data_request_expiration_timeout + 1):
self.bump_mocktime(bump_seconds)
# Test with/without expired request cleanup
if node0.getblockcount() % 2:
node0.generate(1)
self.sync_blocks()
def test_basics():
self.log.info("Testing basics of QGETDATA/QDATA")
p2p_node0 = p2p_connection(node0)
p2p_mn1 = p2p_connection(mn1.node)
id_p2p_node0 = get_mininode_id(node0)
id_p2p_mn1 = get_mininode_id(mn1.node)
# Ensure that both nodes start with zero ban score
wait_for_banscore(node0, id_p2p_node0, 0)
wait_for_banscore(mn1.node, id_p2p_mn1, 0)
self.log.info("Check that normal node doesn't respond to qgetdata "
"and does bump our score")
p2p_node0.test_qgetdata(qgetdata_all, response_expected=False)
wait_for_banscore(node0, id_p2p_node0, 10)
# The masternode should not respond to qgetdata for non-masternode connections
self.log.info("Check that masternode doesn't respond to "
"non-masternode connection. Doesn't bump score.")
p2p_mn1.test_qgetdata(qgetdata_all, response_expected=False)
wait_for_banscore(mn1.node, id_p2p_mn1, 10)
# Open a fake MNAUTH authenticated P2P connection to the masternode to allow qgetdata
node0.disconnect_p2ps()
mn1.node.disconnect_p2ps()
p2p_mn1 = p2p_connection(mn1.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
# The masternode should now respond to qgetdata requests
self.log.info("Request verification vector")
p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0)
wait_for_banscore(mn1.node, id_p2p_mn1, 0)
# Note: our banscore is bumped as we are requesting too rapidly,
# however the node still returns the data
self.log.info("Request encrypted contributions")
p2p_mn1.test_qgetdata(qgetdata_contributions, 0, 0, self.llmq_size)
wait_for_banscore(mn1.node, id_p2p_mn1, 25)
# Request both
# Note: our banscore is bumped as we are requesting too rapidly,
# however the node still returns the data
self.log.info("Request both")
p2p_mn1.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
wait_for_banscore(mn1.node, id_p2p_mn1, 50)
mn1.node.disconnect_p2ps()
self.log.info("Test ban score increase for invalid / unexpected QDATA")
p2p_mn1 = p2p_connection(mn1.node)
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
id_p2p_mn2 = get_mininode_id(mn2.node)
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
wait_for_banscore(mn1.node, id_p2p_mn1, 0)
p2p_mn2.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
qdata_valid = p2p_mn2.get_qdata()
# - Not requested
p2p_mn1.send_message(qdata_valid)
time.sleep(1)
wait_for_banscore(mn1.node, id_p2p_mn1, 10)
# - Already received
force_request_expire()
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_mn1.wait_for_qgetdata()
p2p_mn1.send_message(qdata_valid)
time.sleep(1)
p2p_mn1.send_message(qdata_valid)
wait_for_banscore(mn1.node, id_p2p_mn1, 20)
# - Not like requested
force_request_expire()
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_mn1.wait_for_qgetdata()
qdata_invalid_request = qdata_valid
qdata_invalid_request.data_mask = 2
p2p_mn1.send_message(qdata_invalid_request)
wait_for_banscore(mn1.node, id_p2p_mn1, 30)
# - Invalid verification vector
force_request_expire()
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_mn1.wait_for_qgetdata()
qdata_invalid_vvec = qdata_valid
qdata_invalid_vvec.quorum_vvec.pop()
p2p_mn1.send_message(qdata_invalid_vvec)
wait_for_banscore(mn1.node, id_p2p_mn1, 40)
# - Invalid contributions
force_request_expire()
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_mn1.wait_for_qgetdata()
qdata_invalid_contribution = qdata_valid
qdata_invalid_contribution.enc_contributions.pop()
p2p_mn1.send_message(qdata_invalid_contribution)
wait_for_banscore(mn1.node, id_p2p_mn1, 50)
mn1.node.disconnect_p2ps()
mn2.node.disconnect_p2ps()
self.log.info("Test all available error codes")
p2p_mn1 = p2p_connection(mn1.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
qgetdata_invalid_type = msg_qgetdata(quorum_hash_int, 103, 0x01, protx_hash_int)
qgetdata_invalid_block = msg_qgetdata(protx_hash_int, 100, 0x01, protx_hash_int)
qgetdata_invalid_quorum = msg_qgetdata(int(mn1.node.getblockhash(0), 16), 100, 0x01, protx_hash_int)
qgetdata_invalid_no_member = msg_qgetdata(quorum_hash_int, 100, 0x02, quorum_hash_int)
p2p_mn1.test_qgetdata(qgetdata_invalid_type, QUORUM_TYPE_INVALID)
p2p_mn1.test_qgetdata(qgetdata_invalid_block, QUORUM_BLOCK_NOT_FOUND)
p2p_mn1.test_qgetdata(qgetdata_invalid_quorum, QUORUM_NOT_FOUND)
p2p_mn1.test_qgetdata(qgetdata_invalid_no_member, MASTERNODE_IS_NO_MEMBER)
# The last two error case require the node to miss its DKG data so we just reindex the node.
mn1.node.disconnect_p2ps()
self.restart_mn(mn1, reindex=True)
# Re-connect to the masternode
p2p_mn1 = p2p_connection(mn1.node)
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
id_p2p_mn2 = get_mininode_id(mn2.node)
assert id_p2p_mn1 is not None
assert id_p2p_mn2 is not None
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
# Validate the DKG data is missing
p2p_mn1.test_qgetdata(qgetdata_vvec, QUORUM_VERIFICATION_VECTOR_MISSING)
p2p_mn1.test_qgetdata(qgetdata_contributions, ENCRYPTED_CONTRIBUTIONS_MISSING)
self.log.info("Test DKG data recovery with QDATA")
# Now that mn1 is missing its DKG data try to recover it by querying the data from mn2 and then sending it
# to mn1 with a direct QDATA message.
#
# mininode - QGETDATA -> mn2 - QDATA -> mininode - QDATA -> mn1
#
# However, mn1 only accepts self requested QDATA messages, that's why we trigger mn1 - QGETDATA -> mininode
# via the RPC command "quorum getdata".
#
# Get the required DKG data for mn1
p2p_mn2.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
# Trigger mn1 - QGETDATA -> p2p_mn1
assert mn1.node.quorum("getdata", id_p2p_mn1, 100, quorum_hash, 0x03, mn1.proTxHash)
# Wait until mn1 sent the QGETDATA to p2p_mn1
p2p_mn1.wait_for_qgetdata()
# Send the QDATA received from mn2 to mn1
p2p_mn1.send_message(p2p_mn2.get_qdata())
# Now mn1 should have its data back!
self.wait_for_quorum_data([mn1], 100, quorum_hash, recover=False)
# Restart one more time and make sure data gets saved to db
mn1.node.disconnect_p2ps()
mn2.node.disconnect_p2ps()
self.restart_mn(mn1)
self.wait_for_quorum_data([mn1], 100, quorum_hash, recover=False)
# Test request limiting / banscore increase
def test_request_limit():
def test_send_from_two_to_one(send_1, expected_score_1, send_2, expected_score_2, clear_requests=False):
if clear_requests:
force_request_expire()
if send_1:
p2p_mn3_1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0)
if send_2:
p2p_mn3_2.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0)
wait_for_banscore(mn3.node, id_p2p_mn3_1, expected_score_1)
wait_for_banscore(mn3.node, id_p2p_mn3_2, expected_score_2)
self.log.info("Test request limiting / banscore increases")
p2p_mn1 = p2p_connection(mn1.node)
id_p2p_mn1 = get_mininode_id(mn1.node)
mnauth(mn1.node, id_p2p_mn1, fake_mnauth_1[0], fake_mnauth_1[1])
p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0)
wait_for_banscore(mn1.node, id_p2p_mn1, 0)
force_request_expire(299) # This shouldn't clear requests, next request should bump score
p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0)
wait_for_banscore(mn1.node, id_p2p_mn1, 25)
force_request_expire(1) # This should clear the requests now, next request should not bump score
p2p_mn1.test_qgetdata(qgetdata_vvec, 0, self.llmq_threshold, 0)
wait_for_banscore(mn1.node, id_p2p_mn1, 25)
mn1.node.disconnect_p2ps()
# Requesting one QDATA with mn1 and mn2 from mn3 should not result
# in banscore increase for either of both.
p2p_mn3_1 = p2p_connection(mn3.node, uacomment_m3_1)
p2p_mn3_2 = p2p_connection(mn3.node, uacomment_m3_2)
id_p2p_mn3_1 = get_mininode_id(mn3.node, uacomment_m3_1)
id_p2p_mn3_2 = get_mininode_id(mn3.node, uacomment_m3_2)
assert id_p2p_mn3_1 != id_p2p_mn3_2
mnauth(mn3.node, id_p2p_mn3_1, fake_mnauth_1[0], fake_mnauth_1[1])
mnauth(mn3.node, id_p2p_mn3_2, fake_mnauth_2[0], fake_mnauth_2[1])
# Now try some {mn1, mn2} - QGETDATA -> mn3 combinations to make
# sure request limit works connection based
test_send_from_two_to_one(False, 0, True, 0, True)
test_send_from_two_to_one(True, 0, True, 25)
test_send_from_two_to_one(True, 25, False, 25)
test_send_from_two_to_one(False, 25, True, 25, True)
test_send_from_two_to_one(True, 25, True, 50)
test_send_from_two_to_one(True, 50, True, 75)
test_send_from_two_to_one(True, 50, True, 75, True)
test_send_from_two_to_one(True, 75, False, 75)
test_send_from_two_to_one(False, 75, True, None)
# mn1 should still have a score of 75
wait_for_banscore(mn3.node, id_p2p_mn3_1, 75)
# mn2 should be "banned" now
wait_until(lambda: not p2p_mn3_2.is_connected, timeout=10)
mn3.node.disconnect_p2ps()
# Test that QWATCH connections are also allowed to query data but all
# QWATCH connections share one request limit slot
def test_qwatch_connections():
self.log.info("Test QWATCH connections")
force_request_expire()
p2p_mn3_1 = p2p_connection(mn3.node, uacomment_m3_1)
p2p_mn3_2 = p2p_connection(mn3.node, uacomment_m3_2)
id_p2p_mn3_1 = get_mininode_id(mn3.node, uacomment_m3_1)
id_p2p_mn3_2 = get_mininode_id(mn3.node, uacomment_m3_2)
assert id_p2p_mn3_1 != id_p2p_mn3_2
wait_for_banscore(mn3.node, id_p2p_mn3_1, 0)
wait_for_banscore(mn3.node, id_p2p_mn3_2, 0)
# Send QWATCH for both connections
p2p_mn3_1.send_message(msg_qwatch())
p2p_mn3_2.send_message(msg_qwatch())
# Now send alternating and make sure they share the same request limit
p2p_mn3_1.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
wait_for_banscore(mn3.node, id_p2p_mn3_1, 0)
p2p_mn3_2.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
wait_for_banscore(mn3.node, id_p2p_mn3_2, 25)
p2p_mn3_1.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
wait_for_banscore(mn3.node, id_p2p_mn3_1, 25)
mn3.node.disconnect_p2ps()
def test_watchquorums():
self.log.info("Test -watchquorums support")
for extra_args in [[], ["-watchquorums"]]:
self.restart_node(0, self.extra_args[0] + extra_args)
for i in range(self.num_nodes - 1):
connect_nodes(node0, i + 1)
p2p_node0 = p2p_connection(node0)
p2p_mn2 = p2p_connection(mn2.node)
id_p2p_node0 = get_mininode_id(node0)
id_p2p_mn2 = get_mininode_id(mn2.node)
mnauth(node0, id_p2p_node0, fake_mnauth_1[0], fake_mnauth_1[1])
mnauth(mn2.node, id_p2p_mn2, fake_mnauth_2[0], fake_mnauth_2[1])
p2p_mn2.test_qgetdata(qgetdata_all, 0, self.llmq_threshold, self.llmq_size)
assert node0.quorum("getdata", id_p2p_node0, 100, quorum_hash, 0x03, mn1.proTxHash)
p2p_node0.wait_for_qgetdata()
p2p_node0.send_message(p2p_mn2.get_qdata())
wait_for_banscore(node0, id_p2p_node0, (1 - len(extra_args)) * 10)
node0.disconnect_p2ps()
mn2.node.disconnect_p2ps()
def test_rpc_quorum_getdata_protx_hash():
self.log.info("Test optional proTxHash of `quorum getdata`")
assert_raises_rpc_error(-8, "proTxHash missing",
mn1.node.quorum, "getdata", 0, 100, quorum_hash, 0x02)
assert_raises_rpc_error(-8, "proTxHash invalid",
mn1.node.quorum, "getdata", 0, 100, quorum_hash, 0x03,
"0000000000000000000000000000000000000000000000000000000000000000")
# Enable DKG and disable ChainLocks
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 4070908800)
self.wait_for_sporks_same()
quorum_hash = self.mine_quorum()
node0 = self.nodes[0]
mn1 = self.mninfo[0]
mn2 = self.mninfo[1]
mn3 = self.mninfo[2]
# Convert the hex values into integer values
quorum_hash_int = int(quorum_hash, 16)
protx_hash_int = int(mn1.proTxHash, 16)
# Valid requests
qgetdata_vvec = msg_qgetdata(quorum_hash_int, 100, 0x01, protx_hash_int)
qgetdata_contributions = msg_qgetdata(quorum_hash_int, 100, 0x02, protx_hash_int)
qgetdata_all = msg_qgetdata(quorum_hash_int, 100, 0x03, protx_hash_int)
test_basics()
test_request_limit()
test_qwatch_connections()
test_watchquorums()
test_rpc_quorum_getdata_protx_hash()
if __name__ == '__main__':
QuorumDataMessagesTest().main()
|
|
"""
Tests for stuff in django.utils.datastructures.
"""
import copy
import pickle
import unittest
from django.utils.datastructures import *
class DatastructuresTestCase(unittest.TestCase):
def assertRaisesErrorWithMessage(self, error, message, callable,
*args, **kwargs):
self.assertRaises(error, callable, *args, **kwargs)
try:
callable(*args, **kwargs)
except error, e:
self.assertEqual(message, str(e))
class SortedDictTests(DatastructuresTestCase):
def setUp(self):
self.d1 = SortedDict()
self.d1[7] = 'seven'
self.d1[1] = 'one'
self.d1[9] = 'nine'
self.d2 = SortedDict()
self.d2[1] = 'one'
self.d2[9] = 'nine'
self.d2[0] = 'nil'
self.d2[7] = 'seven'
def test_basic_methods(self):
self.assertEqual(self.d1.keys(), [7, 1, 9])
self.assertEqual(self.d1.values(), ['seven', 'one', 'nine'])
self.assertEqual(self.d1.items(), [(7, 'seven'), (1, 'one'), (9, 'nine')])
def test_overwrite_ordering(self):
""" Overwriting an item keeps it's place. """
self.d1[1] = 'ONE'
self.assertEqual(self.d1.values(), ['seven', 'ONE', 'nine'])
def test_append_items(self):
""" New items go to the end. """
self.d1[0] = 'nil'
self.assertEqual(self.d1.keys(), [7, 1, 9, 0])
def test_delete_and_insert(self):
"""
Deleting an item, then inserting the same key again will place it
at the end.
"""
del self.d2[7]
self.assertEqual(self.d2.keys(), [1, 9, 0])
self.d2[7] = 'lucky number 7'
self.assertEqual(self.d2.keys(), [1, 9, 0, 7])
def test_change_keys(self):
"""
Changing the keys won't do anything, it's only a copy of the
keys dict.
"""
k = self.d2.keys()
k.remove(9)
self.assertEqual(self.d2.keys(), [1, 9, 0, 7])
def test_init_keys(self):
"""
Initialising a SortedDict with two keys will just take the first one.
A real dict will actually take the second value so we will too, but
we'll keep the ordering from the first key found.
"""
tuples = ((2, 'two'), (1, 'one'), (2, 'second-two'))
d = SortedDict(tuples)
self.assertEqual(d.keys(), [2, 1])
real_dict = dict(tuples)
self.assertEqual(sorted(real_dict.values()), ['one', 'second-two'])
# Here the order of SortedDict values *is* what we are testing
self.assertEqual(d.values(), ['second-two', 'one'])
def test_overwrite(self):
self.d1[1] = 'not one'
self.assertEqual(self.d1[1], 'not one')
self.assertEqual(self.d1.keys(), self.d1.copy().keys())
def test_append(self):
self.d1[13] = 'thirteen'
self.assertEqual(
repr(self.d1),
"{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}"
)
def test_pop(self):
self.assertEqual(self.d1.pop(1, 'missing'), 'one')
self.assertEqual(self.d1.pop(1, 'missing'), 'missing')
# We don't know which item will be popped in popitem(), so we'll
# just check that the number of keys has decreased.
l = len(self.d1)
self.d1.popitem()
self.assertEqual(l - len(self.d1), 1)
def test_dict_equality(self):
d = SortedDict((i, i) for i in xrange(3))
self.assertEqual(d, {0: 0, 1: 1, 2: 2})
def test_tuple_init(self):
d = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}")
def test_pickle(self):
self.assertEqual(
pickle.loads(pickle.dumps(self.d1, 2)),
{7: 'seven', 1: 'one', 9: 'nine'}
)
def test_clear(self):
self.d1.clear()
self.assertEqual(self.d1, {})
self.assertEqual(self.d1.keyOrder, [])
class MergeDictTests(DatastructuresTestCase):
def test_simple_mergedict(self):
d1 = {'chris':'cool', 'camri':'cute', 'cotton':'adorable',
'tulip':'snuggable', 'twoofme':'firstone'}
d2 = {'chris2':'cool2', 'camri2':'cute2', 'cotton2':'adorable2',
'tulip2':'snuggable2'}
d3 = {'chris3':'cool3', 'camri3':'cute3', 'cotton3':'adorable3',
'tulip3':'snuggable3'}
d4 = {'twoofme': 'secondone'}
md = MergeDict(d1, d2, d3)
self.assertEqual(md['chris'], 'cool')
self.assertEqual(md['camri'], 'cute')
self.assertEqual(md['twoofme'], 'firstone')
md2 = md.copy()
self.assertEqual(md2['chris'], 'cool')
def test_mergedict_merges_multivaluedict(self):
""" MergeDict can merge MultiValueDicts """
multi1 = MultiValueDict({'key1': ['value1'],
'key2': ['value2', 'value3']})
multi2 = MultiValueDict({'key2': ['value4'],
'key4': ['value5', 'value6']})
mm = MergeDict(multi1, multi2)
# Although 'key2' appears in both dictionaries,
# only the first value is used.
self.assertEqual(mm.getlist('key2'), ['value2', 'value3'])
self.assertEqual(mm.getlist('key4'), ['value5', 'value6'])
self.assertEqual(mm.getlist('undefined'), [])
self.assertEqual(sorted(mm.keys()), ['key1', 'key2', 'key4'])
self.assertEqual(len(mm.values()), 3)
self.assertTrue('value1' in mm.values())
self.assertEqual(sorted(mm.items(), key=lambda k: k[0]),
[('key1', 'value1'), ('key2', 'value3'),
('key4', 'value6')])
self.assertEqual([(k,mm.getlist(k)) for k in sorted(mm)],
[('key1', ['value1']),
('key2', ['value2', 'value3']),
('key4', ['value5', 'value6'])])
class MultiValueDictTests(DatastructuresTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(list(d.iteritems()),
[('position', 'Developer'), ('name', 'Simon')])
self.assertEqual(list(d.iterlists()),
[('position', ['Developer']),
('name', ['Adrian', 'Simon'])])
# MultiValueDictKeyError: "Key 'lastname' not found in
# <MultiValueDict: {'position': ['Developer'],
# 'name': ['Adrian', 'Simon']}>"
self.assertRaisesErrorWithMessage(MultiValueDictKeyError,
'"Key \'lastname\' not found in <MultiValueDict: {\'position\':'\
' [\'Developer\'], \'name\': [\'Adrian\', \'Simon\']}>"',
d.__getitem__, 'lastname')
self.assertEqual(d.get('lastname'), None)
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(d.values(), ['Developer', 'Simon', 'Willison'])
self.assertEqual(list(d.itervalues()),
['Developer', 'Simon', 'Willison'])
def test_copy(self):
for copy_func in [copy.copy, lambda d: d.copy()]:
d1 = MultiValueDict({
"developers": ["Carl", "Fred"]
})
self.assertEqual(d1["developers"], "Fred")
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
class DotExpandedDictTests(DatastructuresTestCase):
def test_dotexpandeddict(self):
d = DotExpandedDict({'person.1.firstname': ['Simon'],
'person.1.lastname': ['Willison'],
'person.2.firstname': ['Adrian'],
'person.2.lastname': ['Holovaty']})
self.assertEqual(d['person']['1']['lastname'], ['Willison'])
self.assertEqual(d['person']['2']['lastname'], ['Holovaty'])
self.assertEqual(d['person']['2']['firstname'], ['Adrian'])
class ImmutableListTests(DatastructuresTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
self.assertRaisesErrorWithMessage(AttributeError,
'ImmutableList object is immutable.', d.sort)
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
self.assertEqual(d[1], 1)
# AttributeError: Object is immutable!
self.assertRaisesErrorWithMessage(AttributeError,
'Object is immutable!', d.__setitem__, 1, 'test')
class DictWrapperTests(DatastructuresTestCase):
def test_dictwrapper(self):
f = lambda x: "*%s" % x
d = DictWrapper({'a': 'a'}, f, 'xx_')
self.assertEqual("Normal: %(a)s. Modified: %(xx_a)s" % d,
'Normal: a. Modified: *a')
|
|
#!/usr/bin/python
"""
$Id: cwm.py,v 1.198 2012/01/30 09:30:20 timbl Exp $
Closed World Machine
(also, in Wales, a valley - topologiclly a partially closed world perhaps?)
This is an application which knows a certian amount of stuff and can manipulate
it. It uses llyn, a (forward chaining) query engine, not an (backward chaining)
inference engine: that is, it will apply all rules it can but won't figure out
which ones to apply to prove something.
License
-------
Cwm: http://www.w3.org/2000/10/swap/doc/cwm.html
Copyright (c) 2000-2004 World Wide Web Consortium, (Massachusetts
Institute of Technology, European Research Consortium for Informatics
and Mathematics, Keio University). All Rights Reserved. This work is
distributed under the W3C Software License [1] in the hope that it
will be useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
[1] http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
"""
#the following lines should be removed. They will NOT work with any distribution
#-----------------
from os import chdir, getcwd
from sys import path
qqq = getcwd()
chdir(path[0])
chdir('..')
path.append(getcwd())
chdir(qqq)
#import swap
#print dir(swap)
#-----------------
#end lines should be removed
import string, sys
# From http://www.w3.org/2000/10/swap/
from swap import diag
from swap.why import explainFormula, newTopLevelFormula
from swap.diag import verbosity, setVerbosity, progress, tracking, setTracking
from swap.uripath import join, splitFrag
from swap.webAccess import urlopenForRDF, load, sandBoxed
from swap import notation3 # N3 parsers and generators
from swap import toXML # RDF generator
from swap.why import BecauseOfCommandLine
from swap.query import think, applyRules, applyQueries, applySparqlQueries, testIncludes
from swap.update import patch
from swap import uripath
from swap import llyn
from swap import RDFSink
cvsRevision = "$Revision: 1.198 $"
################################################# Command line
def doCommand():
"""Command line RDF/N3 tool
<command> <options> <steps> [--with <more args> ]
options:
--pipe Don't store, just pipe out *
steps, in order left to right:
--rdf Input & Output ** in RDF/XML insead of n3 from now on
--n3 Input & Output in N3 from now on. (Default)
--rdf=flags Input & Output ** in RDF and set given RDF flags
--n3=flags Input & Output in N3 and set N3 flags
--ntriples Input & Output in NTriples (equiv --n3=usbpartane -bySubject -quiet)
--language=x Input & Output in "x" (rdf, n3, etc) --rdf same as: --language=rdf
--languageOptions=y --n3=sp same as: --language=n3 --languageOptions=sp
--ugly Store input and regurgitate, data only, fastest *
--bySubject Store input and regurgitate in subject order *
--no No output *
(default is to store and pretty print with anonymous nodes) *
--base=<uri> Set the base URI. Input or output is done as though theis were the document URI.
--closure=flags Control automatic lookup of identifiers (see below)
<uri> Load document. URI may be relative to current directory.
--apply=foo Read rules from foo, apply to store, adding conclusions to store
--patch=foo Read patches from foo, applying insertions and deletions to store
--filter=foo Read rules from foo, apply to store, REPLACING store with conclusions
--query=foo Read a N3QL query from foo, apply it to the store, and replace the store with its conclusions
--sparql=foo Read a SPARQL query from foo, apply it to the store, and replace the store with its conclusions
--rules Apply rules in store to store, adding conclusions to store
--think as -rules but continue until no more rule matches (or forever!)
--engine=otter use otter (in your $PATH) instead of llyn for linking, etc
--why Replace the store with an explanation of its contents
--why=u proof tries to be shorter
--mode=flags Set modus operandi for inference (see below)
--reify Replace the statements in the store with statements describing them.
--dereify Undo the effects of --reify
--flatten Reify only nested subexpressions (not top level) so that no {} remain.
--unflatten Undo the effects of --flatten
--think=foo as -apply=foo but continue until no more rule matches (or forever!)
--purge Remove from store any triple involving anything in class log:Chaff
--data Remove all except plain RDF triples (formulae, forAll, etc)
--strings Dump :s to stdout ordered by :k whereever { :k log:outputString :s }
--crypto Enable processing of crypto builtin functions. Requires python crypto.
--help print this message
--revision print CVS revision numbers of major modules
--chatty=50 Verbose debugging output of questionable use, range 0-99
--sparqlServer instead of outputting, start a SPARQL server on port 8000 of the store
--sparqlResults After sparql query, print in sparqlResults format instead of rdf
finally:
--with Pass any further arguments to the N3 store as os:argv values
* mutually exclusive
** doesn't work for complex cases :-/
Examples:
cwm --rdf foo.rdf --n3 --pipe Convert from rdf/xml to rdf/n3
cwm foo.n3 bar.n3 --think Combine data and find all deductions
cwm foo.n3 --flat --n3=spart
Mode flags affect inference extedning to the web:
r Needed to enable any remote stuff.
a When reading schema, also load rules pointed to by schema (requires r, s)
E Errors loading schemas of definitive documents are ignored
m Schemas and definitive documents laoded are merged into the meta knowledge
(otherwise they are consulted independently)
s Read the schema for any predicate in a query.
u Generate unique ids using a run-specific
Closure flags are set to cause the working formula to be automatically exapnded to
the closure under the operation of looking up:
s the subject of a statement added
p the predicate of a statement added
o the object of a statement added
t the object of an rdf:type statement added
i any owl:imports documents
r any doc:rules documents
E errors are ignored --- This is independant of --mode=E
n Normalize IRIs to URIs
e Smush together any nodes which are = (owl:sameAs)
See http://www.w3.org/2000/10/swap/doc/cwm for more documentation.
Setting the environment variable CWM_RDFLIB to 1 maked Cwm use rdflib to parse
rdf/xml files. Note that this requires rdflib.
"""
import time
import sys
from swap import myStore
# These would just be attributes if this were an object
global _store
global workingContext
option_need_rdf_sometime = 0 # If we don't need it, don't import it
# (to save errors where parsers don't exist)
option_pipe = 0 # Don't store, just pipe though
option_inputs = []
option_reify = 0 # Flag: reify on output (process?)
option_flat = 0 # Flag: reify on output (process?)
option_crypto = 0 # Flag: make cryptographic algorithms available
setTracking(0)
option_outURI = None
option_outputStyle = "-best"
_gotInput = 0 # Do we not need to take input from stdin?
option_meta = 0
option_normalize_iri = 0
option_flags = { "rdf":"l", "n3":"", "think":"", "sparql":""}
# RDF/XML serializer can't do list ("collection") syntax.
option_quiet = 0
option_with = None # Command line arguments made available to N3 processing
option_engine = "llyn"
option_why = ""
_step = 0 # Step number used for metadata
_genid = 0
hostname = "localhost" # @@@@@@@@@@@ Get real one
# The base URI for this process - the Web equiv of cwd
_baseURI = uripath.base()
option_format = "n3" # set the default format
option_first_format = None
_outURI = _baseURI
option_baseURI = _baseURI # To start with - then tracks running base
# First pass on command line - - - - - - - P A S S 1
for argnum in range(1,len(sys.argv)): # options after script name
arg = sys.argv[argnum]
if arg.startswith("--"): arg = arg[1:] # Chop posix-style -- to -
# _equals = string.find(arg, "=")
_lhs = ""
_rhs = ""
try:
[_lhs,_rhs]=arg.split('=',1)
try:
_uri = join(option_baseURI, _rhs)
except ValueError:
_uri = _rhs
except ValueError: pass
if arg == "-ugly": option_outputStyle = arg
elif _lhs == "-base": option_baseURI = _uri
elif arg == "-rdf":
option_format = "rdf"
if option_first_format == None:
option_first_format = option_format
option_need_rdf_sometime = 1
elif _lhs == "-rdf":
option_format = "rdf"
if option_first_format == None:
option_first_format = option_format
option_flags["rdf"] = _rhs
option_need_rdf_sometime = 1
elif arg == "-n3":
option_format = "n3"
if option_first_format == None:
option_first_format = option_format
elif _lhs == "-n3":
option_format = "n3"
if option_first_format == None:
option_first_format = option_format
option_flags["n3"] = _rhs
elif _lhs == "-mode":
option_flags["think"] = _rhs
elif _lhs == "-closure":
if "n" in _rhs:
option_normalize_iri = 1
#elif _lhs == "-solve":
# sys.argv[argnum+1:argnum+1] = ['-think', '-filter=' + _rhs]
elif _lhs == "-language":
option_format = _rhs
if option_first_format == None:
option_first_format = option_format
elif _lhs == "-languageOptions":
option_flags[option_format] = _rhs
elif arg == "-quiet": option_quiet = 1
elif arg == "-pipe": option_pipe = 1
elif arg == "-crypto": option_crypto = 1
elif _lhs == "-why":
diag.tracking=1
diag.setTracking(1)
option_why = _rhs
elif arg == "-why":
diag.tracking=1
diag.setTracking(1)
option_why = ""
elif arg == "-track":
diag.tracking=1
diag.setTracking(1)
elif arg == "-bySubject": option_outputStyle = arg
elif arg == "-no": option_outputStyle = "-no"
elif arg == "-debugString": option_outputStyle = "-debugString"
elif arg == "-strings": option_outputStyle = "-no"
elif arg == "-sparqlResults": option_outputStyle = "-no"
elif arg == "-triples" or arg == "-ntriples":
option_format = "n3"
option_flags["n3"] = "bravestpun"
option_outputStyle = "-bySubject"
option_quiet = 1
elif _lhs == "-outURI": option_outURI = _uri
elif _lhs == "-chatty":
setVerbosity(int(_rhs))
elif arg[:7] == "-apply=": pass
elif arg[:7] == "-patch=": pass
elif arg == "-reify": option_reify = 1
elif arg == "-flat": option_flat = 1
elif arg == "-help":
print doCommand.__doc__
print notation3.ToN3.flagDocumentation
print toXML.ToRDF.flagDocumentation
try:
from swap import sax2rdf # RDF1.0 syntax parser to N3 RDF stream
print sax2rdf.RDFXMLParser.flagDocumentation
except:
pass
return
elif arg == "-revision":
progress( "cwm=",cvsRevision, "llyn=", llyn.cvsRevision)
return
elif arg == "-with":
option_with = sys.argv[argnum+1:] # The rest of the args are passed to n3
break
elif arg[0] == "-": pass # Other option
else :
option_inputs.append(join(option_baseURI, arg))
_gotInput = _gotInput + 1 # input filename
# Between passes, prepare for processing
setVerbosity(0)
if not option_normalize_iri:
llyn.canonical = lambda x: x
# Base defauts
if option_baseURI == _baseURI: # Base not specified explicitly - special case
if _outURI == _baseURI: # Output name not specified either
if _gotInput == 1: # But input file *is*,
_outURI = option_inputs[0] # Just output to same URI
option_baseURI = _outURI # using that as base.
if diag.tracking:
_outURI = RDFSink.runNamespace()[:-1]
option_baseURI = _outURI
option_baseURI = splitFrag(option_baseURI)[0]
# Fix the output sink
if option_format == "rdf":
_outSink = toXML.ToRDF(sys.stdout, _outURI, base=option_baseURI, flags=option_flags["rdf"])
elif option_format == "n3" or option_format == "sparql":
_outSink = notation3.ToN3(sys.stdout.write, base=option_baseURI,
quiet=option_quiet, flags=option_flags["n3"])
elif option_format == "trace":
_outSink = RDFSink.TracingRDFSink(_outURI, base=option_baseURI,
flags=option_flags.get("trace",""))
if option_pipe:
# this is really what a parser wants to dump to
_outSink.backing = llyn.RDFStore( _outURI+"#_g",
argv=option_with, crypto=option_crypto)
else:
# this is really what a store wants to dump to
_outSink.backing = notation3.ToN3(sys.stdout.write,
base=option_baseURI, quiet=option_quiet,
flags=option_flags["n3"])
# hm. why does TimBL use sys.stdout.write, above? performance at the
else:
raise NotImplementedError
version = "$Id: cwm.py,v 1.198 2012/01/30 09:30:20 timbl Exp $"
if not option_quiet and option_outputStyle != "-no":
_outSink.makeComment("Processed by " + version[1:-1]) # Strip $ to disarm
_outSink.makeComment(" using base " + option_baseURI)
if option_flat:
_outSink = notation3.Reifier(_outSink, _outURI+ "#_formula", flat=1)
if diag.tracking:
myReason = BecauseOfCommandLine(`sys.argv`)
# @@ add user, host, pid, pwd, date time? Privacy!
else:
myReason = None
if option_pipe:
_store = _outSink
workingContext = _outSink #.newFormula()
else:
if "u" in option_flags["think"]:
_store = llyn.RDFStore(argv=option_with, crypto=option_crypto)
else:
_store = llyn.RDFStore( _outURI+"#_g",
argv=option_with, crypto=option_crypto)
myStore.setStore(_store)
if _gotInput:
workingContext = _store.newFormula(option_inputs [0]+"#_work")
newTopLevelFormula(workingContext)
else: # default input
if option_first_format is None: option_first_format = option_format
ContentType={ "rdf": "application/xml+rdf", "n3":
"text/n3", "sparql":
"x-application/sparql"}[option_first_format]
workingContext = _store.load(
# asIfFrom = join(_baseURI, ".stdin"),
asIfFrom = _baseURI,
contentType = ContentType,
flags = option_flags[option_first_format],
remember = 0,
referer = "",
why = myReason, topLevel=True)
workingContext.reopen()
workingContext.stayOpen = 1 # Never canonicalize this. Never share it.
# ____________________________________________________________________
# Take commands from command line:- - - - - P A S S 2
option_format = "n3" # Use RDF/n3 rather than RDF/XML
option_flags = { "rdf":"l", "n3":"", "think": "", "sparql":"" }
option_quiet = 0
_outURI = _baseURI
option_baseURI = _baseURI # To start with
def filterize():
"""implementation of --filter
for the --filter command, so we don't have it printed twice
"""
global workingContext
global r
workingContext = workingContext.canonicalize()
_store._formulaeOfLength = {}
filterContext = _store.newFormula()
newTopLevelFormula(filterContext)
_store.load(_uri, openFormula=filterContext,
why=myReason, referer="")
_newContext = _store.newFormula()
newTopLevelFormula(_newContext)
applyRules(workingContext, filterContext, _newContext)
workingContext.close()
workingContext = _newContext
sparql_query_formula = None
for arg in sys.argv[1:]: # Command line options after script name
if verbosity()>5: progress("Processing %s." % (arg))
if arg.startswith("--"): arg = arg[1:] # Chop posix-style -- to -
_equals = string.find(arg, "=")
_lhs = ""
_rhs = ""
if _equals >=0:
_lhs = arg[:_equals]
_rhs = arg[_equals+1:]
try:
_uri = join(option_baseURI, _rhs)
except ValueError:
_uri =_rhs
if arg[0] != "-":
_inputURI = join(option_baseURI, splitFrag(arg)[0])
assert ':' in _inputURI
ContentType={ "rdf": "application/xml+rdf", "n3":
"text/n3",
"sparql": "x-application/sparql"}[option_format]
if not option_pipe: workingContext.reopen()
try:
load(_store, _inputURI,
openFormula=workingContext,
contentType =ContentType,
flags=option_flags[option_format],
referer="",
why=myReason)
except:
progress(_inputURI)
raise
_gotInput = 1
elif arg == "-help":
pass # shouldn't happen
elif arg == "-revision":
pass
elif _lhs == "-base":
option_baseURI = _uri
if verbosity() > 10: progress("Base now "+option_baseURI)
elif arg == "-ugly":
option_outputStyle = arg
elif arg == "-crypto": pass
elif arg == "-pipe": pass
elif _lhs == "-outURI": option_outURI = _uri
elif arg == "-rdf": option_format = "rdf"
elif _lhs == "-rdf":
option_format = "rdf"
option_flags["rdf"] = _rhs
elif _lhs == "-mode":
option_flags["think"] = _rhs
elif _lhs == "-closure":
workingContext.setClosureMode(_rhs)
elif arg == "-n3": option_format = "n3"
elif _lhs == "-n3":
option_format = "n3"
option_flags["n3"] = _rhs
elif _lhs == "-language":
option_format = _rhs
if option_first_format == None:
option_first_format = option_format
elif _lhs == "-languageOptions":
option_flags[option_format] = _lhs
elif arg == "-quiet" : option_quiet = 1
elif _lhs == "-chatty": setVerbosity(int(_rhs))
elif arg[:7] == "-track=":
diag.tracking = int(_rhs)
elif option_pipe: ############## End of pipable options
print "# Command line error: %s illegal option with -pipe", arg
break
elif arg == "-triples" or arg == "-ntriples":
option_format = "n3"
option_flags["n3"] = "spartan"
option_outputStyle = "-bySubject"
option_quiet = 1
elif arg == "-bySubject":
option_outputStyle = arg
elif arg == "-debugString":
option_outputStyle = arg
elif arg[:7] == "-apply=":
workingContext = workingContext.canonicalize()
filterContext = _store.load(_uri,
flags=option_flags[option_format],
referer="",
why=myReason, topLevel=True)
workingContext.reopen()
applyRules(workingContext, filterContext);
elif arg[:7] == "-apply=":
workingContext = workingContext.canonicalize()
filterContext = _store.load(_uri,
flags=option_flags[option_format],
referer="",
why=myReason, topLevel=True)
workingContext.reopen()
applyRules(workingContext, filterContext);
elif arg[:7] == "-patch=":
workingContext = workingContext.canonicalize()
filterContext = _store.load(_uri,
flags=option_flags[option_format],
referer="",
why=myReason, topLevel=True)
workingContext.reopen()
patch(workingContext, filterContext);
elif _lhs == "-filter":
filterize()
elif _lhs == "-query":
workingContext = workingContext.canonicalize()
filterContext = _store.load(_uri,
flags=option_flags[option_format],
referer="",
why=myReason, topLevel=True)
_newContext = _store.newFormula()
applyQueries(workingContext, filterContext, _newContext)
workingContext.close()
workingContext = _newContext
elif _lhs == "-sparql":
workingContext.stayOpen = False
workingContext = workingContext.canonicalize()
filterContext = _store.load(_uri, why=myReason,
referer="", contentType="x-application/sparql")
_newContext = _store.newFormula()
_newContext.stayOpen = True
sparql_query_formula = filterContext
applySparqlQueries(workingContext, filterContext, _newContext)
# workingContext.close()
workingContext = _newContext
elif _lhs == "-why" or arg == "-why":
workingContext.stayOpen = False
workingContext = workingContext.close()
workingContext = explainFormula(workingContext, option_why)
# Can't prove proofs
diag.tracking=0
diag.setTracking(0)
elif arg == "-dump":
workingContext = workingContext.canonicalize()
progress("\nDump of working formula:\n" + workingContext.debugString())
elif arg == "-purge":
workingContext.reopen()
_store.purge(workingContext)
elif arg == "-purge-rules" or arg == "-data":
workingContext.reopen()
_store.purgeExceptData(workingContext)
elif arg == "-rules":
workingContext.reopen()
applyRules(workingContext, workingContext)
elif arg[:7] == "-think=":
filterContext = _store.load(_uri, referer="", why=myReason, topLevel=True)
if verbosity() > 4:
progress( "Input rules to --think from " + _uri)
workingContext.reopen()
think(workingContext, filterContext, mode=option_flags["think"])
elif arg[:7] == "-solve=":
# --solve is a combination of --think and --filter.
think(workingContext, mode=option_flags["think"])
filterize()
elif _lhs == "-engine":
option_engine = _rhs
elif arg == "-think":
workingContext.isWorkingContext = True
think(workingContext, mode=option_flags["think"])
elif arg == '-rete':
from swap import pycwmko
pythink = pycwmko.directPychinkoQuery(workingContext)
#return
#pythink()
"""
from pychinko import interpreter
from swap.set_importer import Set, ImmutableSet
pyf = pycwmko.N3Loader.N3Loader()
conv = pycwmko.ToPyStore(pyf)
conv.statements(workingContext)
interp = interpreter.Interpreter(pyf.rules[:])
interp.addFacts(Set(pyf.facts), initialSet=True)
interp.run()
pyf.facts = interp.totalFacts
workingContext = workingContext.store.newFormula()
reconv = pycwmko.FromPyStore(workingContext, pyf)
reconv.run()
"""
elif arg == '-sparqlServer':
from swap.sparql import webserver
from swap import cwm_sparql
sandBoxed(True)
workingContext.stayOpen = False
workingContext = workingContext.canonicalize()
def _handler(s):
return cwm_sparql.sparql_queryString(workingContext, s)
webserver.sparql_handler = _handler
webserver.run()
elif arg == "-lxkbdump": # just for debugging
raise NotImplementedError
elif arg == "-lxfdump": # just for debugging
raise NotImplementedError
elif _lhs == "-prove":
# code copied from -filter without really being understood -sdh
_tmpstore = llyn.RDFStore( _outURI+"#_g", metaURI=_metaURI, argv=option_with, crypto=option_crypto)
tmpContext = _tmpstore.newFormula(_uri+ "#_formula")
_newURI = join(_baseURI, "_w_"+`_genid`) # Intermediate
_genid = _genid + 1
_newContext = _tmpstore.newFormula(_newURI+ "#_formula")
_tmpstore.loadURI(_uri)
print targetkb
elif arg == "-flatten":
#raise NotImplementedError
from swap import reify
workingContext = reify.flatten(workingContext)
elif arg == "-unflatten":
from swap import reify
workingContext = reify.unflatten(workingContext)
#raise NotImplementedError
elif arg == "-reify":
from swap import reify
workingContext = reify.reify(workingContext)
elif arg == "-dereify":
from swap import reify
workingContext = reify.dereify(workingContext)
elif arg == "-size":
progress("Size: %i statements in store, %i in working formula."
%(_store.size, workingContext.size()))
elif arg == "-strings": # suppress output
workingContext.outputStrings()
option_outputStyle = "-no"
elif arg == '-sparqlResults':
from cwm_sparql import outputString, SPARQL_NS
ns = _store.newSymbol(SPARQL_NS)
if not sparql_query_formula:
raise ValueError('No query')
else:
sys.stdout.write(outputString(sparql_query_formula, workingContext)[0].encode('utf_8'))
option_outputStyle = "-no"
elif arg == "-no": # suppress output
option_outputStyle = arg
elif arg[:8] == "-outURI=": pass
elif arg == "-with": break
else:
progress( "cwm: Unknown option: " + arg)
sys.exit(-1)
# Squirt it out if not piped
workingContext.stayOpen = 0 # End its use as an always-open knoweldge base
if option_pipe:
workingContext.endDoc()
else:
if hasattr(_outSink, "serializeKB"):
raise NotImplementedError
else:
if verbosity()>5: progress("Begining output.")
workingContext = workingContext.close()
assert workingContext.canonical != None
if option_outputStyle == "-ugly":
_store.dumpChronological(workingContext, _outSink)
elif option_outputStyle == "-bySubject":
_store.dumpBySubject(workingContext, _outSink)
elif option_outputStyle == "-no":
pass
elif option_outputStyle == "-debugString":
print workingContext.debugString()
else: # "-best"
_store.dumpNested(workingContext, _outSink,
flags=option_flags[option_format])
############################################################ Main program
if __name__ == '__main__':
import os
doCommand()
|
|
from __future__ import with_statement
import os
import re
import pytest
import time
import six
from fanstatic import (Library,
Resource,
NeededResources,
Group,
init_needed,
del_needed,
get_needed,
clear_needed,
register_inclusion_renderer,
ConfigurationError,
bundle_resources,
LibraryDependencyCycleError,
NEEDED,
UnknownResourceExtensionError,
UnknownResourceError,
set_resource_file_existence_checking)
from fanstatic.core import inclusion_renderers
from fanstatic.core import thread_local_needed_data
from fanstatic.core import ModeResourceDependencyError
from fanstatic.codegen import sort_resources_topological
def test_resource():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources()
needed.need(y1)
assert needed.resources() == [x2, x1, y1]
def test_resource_file_exists(tmpdir):
tmpdir.join('a.js').write('/* hello world */')
# by default this is set to False during the tests, but in normal
# non-test circumstances this is set to True, and we want to
# test things for real here
set_resource_file_existence_checking(True)
foo = Library('foo', tmpdir.strpath)
# since a.js exists, this should work
a = Resource(foo, 'a.js')
# now we try to create a resource that refers to a file
# that doesn't exist
with pytest.raises(UnknownResourceError):
b = Resource(foo, 'b.js')
sub_c = tmpdir.mkdir('sub').join('c.css').write('c')
c = Resource(foo, 'sub/c.css')
def test_resource_register_with_library():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js', minified='a.min.js')
assert len(foo.known_resources) == 2
assert x1 in foo.known_resources.values()
# Can not use the same relpath for two Resource declarations.
with pytest.raises(ConfigurationError):
x2 = Resource(foo, 'a.js')
def test_group_resource():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
group = Group([x1, x2])
needed = NeededResources()
needed.need(group)
assert group.resources == set([x1, x2])
more_stuff = Resource(foo, 'more_stuff.js', depends=[group])
assert more_stuff.resources == set([x1, x2, more_stuff])
def test_convenience_need_not_initialized():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
dummy = get_needed()
assert not isinstance(dummy, NeededResources)
# We return a new dummy instance for every get_needed:
dummy2 = get_needed()
assert dummy != dummy2
# A dummy never has resources:
assert not dummy.has_resources()
dummy.need(y1)
with pytest.raises(NotImplementedError):
dummy.render()
def test_convenience_clear_not_initialized():
# This test is put near the top of this module, or at least before
# the very first time ``init_needed()`` is called.
dummy = get_needed()
with pytest.raises(NotImplementedError):
dummy.clear()
with pytest.raises(NotImplementedError):
clear_needed()
# Initialize a needed resources object.
needed = init_needed()
assert get_needed() == needed
assert thread_local_needed_data.__dict__[NEEDED] == needed
# Clear it.
del_needed()
# It is gone, really.
with pytest.raises(KeyError):
thread_local_needed_data.__dict__[NEEDED]
# Clearing it again is OK.
del_needed()
# get_needed still work, dummy-style.
dummy2 = get_needed()
assert dummy2 != needed
with pytest.raises(NotImplementedError):
dummy.clear()
with pytest.raises(NotImplementedError):
clear_needed()
def test_convenience_need():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = init_needed()
assert get_needed() == needed
assert get_needed().resources() == []
y1.need()
assert get_needed().resources() == [x2, x1, y1]
def test_convenience_group_resource_need():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js')
group = Group([x1, x2, y1])
needed = init_needed()
assert get_needed() == needed
assert get_needed().resources() == []
group.need()
assert get_needed().resources() == [x2, x1, y1]
def test_depend_on_group():
foo = Library('foo', '')
a = Resource(foo, 'a.js')
b = Resource(foo, 'b.js')
g = Group([a, b])
c = Resource(foo, 'c.js', depends=[g])
g2 = Group([g])
g3 = Group([g, g2])
assert c.depends == set([a, b])
assert g2.depends == set([a, b])
assert g3.depends == set([a, b])
needed = NeededResources()
needed.need(c)
assert needed.resources() == [a, b, c]
def test_redundant_resource():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources()
needed.need(y1)
needed.need(y1)
assert needed.resources() == [x2, x1, y1]
needed.need(x1)
assert needed.resources() == [x2, x1, y1]
needed.need(x2)
assert needed.resources() == [x2, x1, y1]
def test_redundant_resource_reorder():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources()
needed.need(x1)
needed.need(x2)
needed.need(y1)
assert needed.resources() == [x2, x1, y1]
def test_redundant_more_complicated():
foo = Library('foo', '')
a1 = Resource(foo, 'a1.js')
a2 = Resource(foo, 'a2.js', depends=[a1])
a3 = Resource(foo, 'a3.js', depends=[a2])
a4 = Resource(foo, 'a4.js', depends=[a1])
needed = NeededResources()
needed.need(a3)
assert needed.resources() == [a1, a2, a3]
needed.need(a4)
# a4 is sorted before a3, because it is less deep
# in the dependency tree
assert needed.resources() == [a1, a2, a4, a3]
def test_redundant_more_complicated_reversed():
foo = Library('foo', '')
a1 = Resource(foo, 'a1.js')
a2 = Resource(foo, 'a2.js', depends=[a1])
a3 = Resource(foo, 'a3.js', depends=[a2])
a4 = Resource(foo, 'a4.js', depends=[a1])
needed = NeededResources()
needed.need(a4)
needed.need(a3)
# this will always be consistent, no matter
# in what order we need the resources
assert needed.resources() == [a1, a2, a4, a3]
def test_redundant_more_complicated_depends_on_all():
foo = Library('foo', '')
a1 = Resource(foo, 'a1.js')
a2 = Resource(foo, 'a2.js', depends=[a1])
a3 = Resource(foo, 'a3.js', depends=[a2])
a4 = Resource(foo, 'a4.js', depends=[a1])
a5 = Resource(foo, 'a5.js', depends=[a4, a3])
needed = NeededResources()
needed.need(a5)
assert needed.resources() == [a1, a2, a4, a3, a5]
def test_redundant_more_complicated_depends_on_all_reorder():
foo = Library('foo', '')
a1 = Resource(foo, 'a1.js')
a2 = Resource(foo, 'a2.js', depends=[a1])
a3 = Resource(foo, 'a3.js', depends=[a2])
a4 = Resource(foo, 'a4.js', depends=[a1])
a5 = Resource(foo, 'a5.js', depends=[a4, a3])
needed = NeededResources()
needed.need(a3)
needed.need(a5)
assert needed.resources() == [a1, a2, a4, a3, a5]
def test_mode_fully_specified():
foo = Library('foo', '')
k_debug = Resource(foo, 'k-debug.js')
k = Resource(foo, 'k.js', debug=k_debug)
needed = NeededResources()
needed.need(k)
assert needed.resources() == [k]
needed = NeededResources(debug=True)
needed.need(k)
assert needed.resources() == [k_debug]
# If no minified can be found, the 'raw' resource is taken.
needed = NeededResources(minified=True)
needed.need(k)
assert needed.resources() == [k]
with pytest.raises(ConfigurationError):
NeededResources(debug=True, minified=True)
# If only a minified resource is defined, debug returns the raw version.
x = Resource(foo, 'x.js', minified='x-min.js')
needed = NeededResources(debug=True)
needed.need(x)
assert needed.resources() == [x]
def test_mode_shortcut():
foo = Library('foo', '')
k = Resource(foo, 'k.js', debug='k-debug.js')
needed = NeededResources()
needed.need(k)
assert needed.resources() == [k]
needed = NeededResources(debug=True)
needed.need(k)
assert len(needed.resources()) == 1
assert needed.resources()[0].relpath == 'k-debug.js'
def test_mode_inherit_dependency_nr():
foo = Library('foo', '')
k = Resource(foo, 'k.js')
l_debug = Resource(foo, 'l-debug.js')
assert l_debug.dependency_nr == 0
l = Resource(foo, 'l.js', debug=l_debug, depends=[k])
assert l_debug.dependency_nr == 1
def test_rollup():
foo = Library('foo', '')
b1 = Resource(foo, 'b1.js')
b2 = Resource(foo, 'b2.js')
giant = Resource(foo, 'giant.js', supersedes=[b1, b2])
needed = NeededResources(rollup=True)
needed.need(b1)
needed.need(b2)
assert needed.resources() == [giant]
def test_rollup_cannot():
foo = Library('foo', '')
b1 = Resource(foo, 'b1.js')
b2 = Resource(foo, 'b2.js')
giant = Resource(foo, 'giant.js', supersedes=[b1, b2])
needed = NeededResources(rollup=True)
needed.need(b1)
assert needed.resources() == [b1]
assert giant not in needed.resources()
def test_rollup_larger():
foo = Library('foo', '')
c1 = Resource(foo, 'c1.css')
c2 = Resource(foo, 'c2.css')
c3 = Resource(foo, 'c3.css')
giant = Resource(foo, 'giant.css', supersedes=[c1, c2, c3])
needed = NeededResources(rollup=True)
needed.need(c1)
assert needed.resources() == [c1]
needed.need(c2)
assert needed.resources() == [c1, c2]
needed.need(c3)
assert needed.resources() == [giant]
def test_rollup_size_competing():
foo = Library('foo', '')
d1 = Resource(foo, 'd1.js')
d2 = Resource(foo, 'd2.js')
d3 = Resource(foo, 'd3.js')
giant = Resource(foo, 'giant.js', supersedes=[d1, d2])
giant_bigger = Resource(foo, 'giant-bigger.js',
supersedes=[d1, d2, d3])
needed = NeededResources(rollup=True)
needed.need(d1)
needed.need(d2)
needed.need(d3)
assert needed.resources() == [giant_bigger]
assert giant not in needed.resources()
def test_rollup_modes():
foo = Library('foo', '')
f1 = Resource(foo, 'f1.js', debug='f1-debug.js')
f2 = Resource(foo, 'f2.js', debug='f2-debug.js')
giantf = Resource(foo, 'giantf.js', supersedes=[f1, f2],
debug='giantf-debug.js')
needed = NeededResources(rollup=True)
needed.need(f1)
needed.need(f2)
assert needed.resources() == [giantf]
needed = NeededResources(rollup=True, debug=True)
needed.need(f1)
needed.need(f2)
assert needed.resources() == [giantf.modes['debug']]
def test_rollup_without_mode():
foo = Library('foo', '')
h1 = Resource(foo, 'h1.js', debug='h1-debug.js')
h2 = Resource(foo, 'h2.js', debug='h2-debug.js')
gianth = Resource(foo, 'gianth.js', supersedes=[h1, h2])
needed = NeededResources(resources=[h1, h2], rollup=True, debug=True)
# no mode available for rollup, use the rollup.
assert needed.resources() == [gianth]
def test_rendering():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources()
needed.need(y1)
assert needed.render() == '''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script>'''
def test_rendering_base_url():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources()
needed.need(y1)
assert needed.render() == '''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script>'''
needed = NeededResources(base_url='http://localhost/static')
needed.need(y1)
assert needed.render() == '''\
<link rel="stylesheet" type="text/css" href="http://localhost/static/fanstatic/foo/b.css" />
<script type="text/javascript" src="http://localhost/static/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="http://localhost/static/fanstatic/foo/c.js"></script>'''
# The base_url has been set.
assert needed.has_base_url()
needed.set_base_url('foo')
# The base_url can only be set once.
assert needed._base_url == 'http://localhost/static'
def test_empty_base_url_and_publisher_signature():
''' When the base_url is not set and the publisher_signature is an empty string,
render a URL without them. '''
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
needed = NeededResources(publisher_signature='')
needed.need(x1)
assert needed.render() == '''\
<script type="text/javascript" src="/foo/a.js"></script>'''
def test_rendering_base_url_assign():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources()
needed.need(y1)
needed.set_base_url('http://localhost/static')
assert needed.render() == '''\
<link rel="stylesheet" type="text/css" href="http://localhost/static/fanstatic/foo/b.css" />
<script type="text/javascript" src="http://localhost/static/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="http://localhost/static/fanstatic/foo/c.js"></script>'''
def test_library_url_default_publisher_signature():
foo = Library('foo', '')
needed = NeededResources()
assert needed.library_url(foo) == '/fanstatic/foo'
def test_library_url_publisher_signature():
foo = Library('foo', '')
needed = NeededResources(publisher_signature='waku')
assert needed.library_url(foo) == '/waku/foo'
def test_library_url_base_url():
foo = Library('foo', '')
needed = NeededResources(base_url="http://example.com/something")
assert (needed.library_url(foo) ==
'http://example.com/something/fanstatic/foo')
def test_library_url_script_name():
foo = Library('foo', '')
needed = NeededResources(script_name='/root')
assert needed.library_url(foo) == '/root/fanstatic/foo'
def test_library_url_script_name_base_url():
foo = Library('foo', '')
needed = NeededResources(
script_name='/root', base_url="http://example.com/something")
# base_url is set so script_name should be ignored
assert (needed.library_url(foo) ==
'http://example.com/something/fanstatic/foo')
def test_library_url_version_hashing(tmpdir):
foo = Library('foo', tmpdir.strpath)
needed = NeededResources(versioning=True)
url = needed.library_url(foo)
assert re.match('/fanstatic/foo/\+version\+[0-9T:.-]*$', url)
# The md5 based version URL is available through the
# `versioning_use_md5` parameter:
needed = NeededResources(versioning=True, versioning_use_md5=True)
md5_url = needed.library_url(foo)
assert url != md5_url
# If the Library defines a version, the version is used.
bar = Library('bar', '', version='1')
assert needed.library_url(bar) == '/fanstatic/bar/+version+1'
def test_library_url_hashing_norecompute(tmpdir):
foo = Library('foo', tmpdir.strpath)
needed = NeededResources(versioning=True, recompute_hashes=False)
url = needed.library_url(foo)
# now create a file
resource = tmpdir.join('test.js')
resource.write('/* test */')
# since we're not re-computing hashes, the hash in the URL won't change
assert needed.library_url(foo) == url
def test_library_url_hashing_recompute(tmpdir):
foo = Library('foo', tmpdir.strpath)
needed = NeededResources(versioning=True, recompute_hashes=True)
url = needed.library_url(foo)
# now create a file
resource = tmpdir.join('test.js')
time.sleep(0.02)
# Sleep extra long on filesystems that report in seconds
# instead of milliseconds.
if os.path.getmtime(os.curdir).is_integer():
time.sleep(1)
resource.write('/* test */')
# the hash is recalculated now, so it changes
assert needed.library_url(foo) != url
def test_html_insert():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources()
needed.need(y1)
html = "<html><head>something more</head></html>"
# XXX where is extraneous space coming from? misguided attempt at
# indentation?
assert needed.render_into_html(html) == '''\
<html><head>
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script>
something more</head></html>'''
def test_html_insert_head_with_attributes():
# ticket 72: .need() broken when <head> tag has attributes
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
needed = NeededResources(resources=[x1])
html = '<html><head profile="http://example.org">something</head></html>'
assert needed.render_into_html(html) == '''\
<html><head profile="http://example.org">
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
something</head></html>'''
def test_html_top_bottom():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources()
needed.need(y1)
top, bottom = needed.render_topbottom()
assert top == '''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script>'''
assert bottom == ''
def test_html_top_bottom_set_bottom():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources(bottom=True)
needed.need(y1)
top, bottom = needed.render_topbottom()
assert top == '''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script>'''
assert bottom == ''
def test_html_top_bottom_force_bottom():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
needed = NeededResources(bottom=True, force_bottom=True)
needed.need(y1)
top, bottom = needed.render_topbottom()
assert top == '''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />'''
assert bottom == '''\
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script>'''
def test_html_bottom_safe():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
y2 = Resource(foo, 'y2.js', bottom=True)
needed = NeededResources()
needed.need(y1)
needed.need(y2)
top, bottom = needed.render_topbottom()
assert top == '''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/y2.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script>'''
assert bottom == ''
needed = NeededResources(bottom=True)
needed.need(y1)
needed.need(y2)
top, bottom = needed.render_topbottom()
assert top == '''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script>'''
assert bottom == '''\
<script type="text/javascript" src="/fanstatic/foo/y2.js"></script>'''
needed = NeededResources(bottom=True, force_bottom=True)
needed.need(y1)
needed.need(y2)
top, bottom = needed.render_topbottom()
assert top == '''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />'''
assert bottom == '''\
<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/y2.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script>'''
# XXX add sanity checks: cannot declare something bottom safe while
# what it depends on isn't bottom safe
def test_html_bottom_safe_used_with_minified():
foo = Library('foo', '')
a = Resource(foo, 'a.js', minified='a-minified.js', bottom=True)
needed = NeededResources(minified=True, bottom=True)
needed.need(a)
top, bottom = needed.render_topbottom()
assert top == ''
assert bottom == ('<script type="text/javascript" '
'src="/fanstatic/foo/a-minified.js"></script>')
def test_top_bottom_insert():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
html = "<html><head>rest of head</head><body>rest of body</body></html>"
needed = NeededResources(bottom=True, force_bottom=True)
needed.need(y1)
assert needed.render_topbottom_into_html(html) == '''\
<html><head>
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />
rest of head</head><body>rest of body<script type="text/javascript" src="/fanstatic/foo/a.js"></script>
<script type="text/javascript" src="/fanstatic/foo/c.js"></script></body></html>'''
def test_inclusion_renderers():
assert sorted(
[(order, key) for key, (order, _) in inclusion_renderers.items()]) == [
(10, '.css'), (20, '.js'), (30, '.ico')]
_, renderer = inclusion_renderers['.js']
assert renderer('http://localhost/script.js') == (
'<script type="text/javascript" src="http://localhost/script.js"></script>')
def test_register_inclusion_renderer():
foo = Library('foo', '')
with pytest.raises(UnknownResourceExtensionError):
# The renderer for '.unknown' is not yet defined.
Resource(foo, 'nothing.unknown')
def render_unknown(url):
return '<link rel="unknown" href="%s" />' % url
register_inclusion_renderer('.unknown', render_unknown)
a = Resource(foo, 'nothing.unknown')
needed = NeededResources()
needed.need(a)
assert needed.render() == ('<link rel="unknown" href="/fanstatic/foo/nothing.unknown" />')
def test_registered_inclusion_renderers_in_order():
foo = Library('foo', '')
def render_unknown(url):
return '<unknown href="%s"/>' % url
register_inclusion_renderer('.later', render_unknown, 50)
a = Resource(foo, 'nothing.later')
b = Resource(foo, 'something.js')
c = Resource(foo, 'something.css')
d = Resource(foo, 'something.ico')
needed = NeededResources()
needed.need(a)
needed.need(b)
needed.need(c)
needed.need(d)
assert needed.render() == """\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/something.css" />
<script type="text/javascript" src="/fanstatic/foo/something.js"></script>
<link rel="shortcut icon" type="image/x-icon" href="/fanstatic/foo/something.ico"/>
<unknown href="/fanstatic/foo/nothing.later"/>"""
register_inclusion_renderer('.sooner', render_unknown, 5)
e = Resource(foo, 'nothing.sooner')
needed.need(e)
assert needed.render() == """\
<unknown href="/fanstatic/foo/nothing.sooner"/>
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/something.css" />
<script type="text/javascript" src="/fanstatic/foo/something.js"></script>
<link rel="shortcut icon" type="image/x-icon" href="/fanstatic/foo/something.ico"/>
<unknown href="/fanstatic/foo/nothing.later"/>"""
register_inclusion_renderer('.between', render_unknown, 25)
f = Resource(foo, 'nothing.between')
needed.need(f)
assert needed.render() == """\
<unknown href="/fanstatic/foo/nothing.sooner"/>
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/something.css" />
<script type="text/javascript" src="/fanstatic/foo/something.js"></script>
<unknown href="/fanstatic/foo/nothing.between"/>
<link rel="shortcut icon" type="image/x-icon" href="/fanstatic/foo/something.ico"/>
<unknown href="/fanstatic/foo/nothing.later"/>"""
def test_custom_renderer_for_resource():
foo = Library('foo', '')
from fanstatic.core import render_print_css
a = Resource(foo, 'printstylesheet.css', renderer=render_print_css)
needed = NeededResources()
needed.need(a)
assert needed.render() == """\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/printstylesheet.css" media="print" />"""
def render_unknown(url):
return '<unknown href="%s"/>' % url
b = Resource(foo, 'nothing.unknown', renderer=render_unknown)
needed.need(b)
assert needed.render() == """\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/printstylesheet.css" media="print" />
<unknown href="/fanstatic/foo/nothing.unknown"/>"""
def test_custom_renderer_keep_together():
foo = Library('foo', '')
def render_print_css(url):
return ('<link rel="stylesheet" type="text/css" href="%s" media="print"/>' %
url)
a = Resource(foo, 'printstylesheet.css', renderer=render_print_css)
b = Resource(foo, 'regular.css')
c = Resource(foo, 'something.js')
needed = NeededResources()
needed.need(a)
needed.need(b)
needed.need(c)
assert needed.render() == """\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/printstylesheet.css" media="print"/>
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/regular.css" />
<script type="text/javascript" src="/fanstatic/foo/something.js"></script>"""
def test_resource_subclass_render():
foo = Library('foo', '')
class MyResource(Resource):
def render(self, library_url):
return '<myresource reference="%s/%s"/>' % (library_url, self.relpath)
a = MyResource(foo, 'printstylesheet.css')
needed = NeededResources()
needed.need(a)
assert needed.render() == """\
<myresource reference="/fanstatic/foo/printstylesheet.css"/>"""
def test_clear():
foo = Library('foo', '')
a1 = Resource(foo, 'a1.js')
a2 = Resource(foo, 'a2.js', depends=[a1])
a3 = Resource(foo, 'a3.js', depends=[a2])
a4 = Resource(foo, 'a4.js', depends=[a1])
a5 = Resource(foo, 'a5.js', depends=[a4, a3])
needed = NeededResources()
needed.need(a1)
needed.need(a2)
needed.need(a3)
assert needed.resources() == [a1, a2, a3]
# For some reason,for example an error page needs to be rendered,
# the currently needed resources need to be cleared.
needed.clear()
assert needed.resources() == []
needed.need(a4)
needed.need(a5)
assert needed.resources() == [a1, a2, a4, a3, a5]
def test_convenience_clear():
foo = Library('foo', '')
x1 = Resource(foo, 'a.js')
x2 = Resource(foo, 'b.css')
y1 = Resource(foo, 'c.js', depends=[x1, x2])
z1 = Resource(foo, 'd.js')
z2 = Resource(foo, 'e.js', depends=[z1, x1])
needed = init_needed()
y1.need()
assert needed.resources() == [x2, x1, y1]
# For some reason,for example an error page needs to be rendered,
# the currently needed resources need to be cleared.
clear_needed()
assert needed.resources() == []
z2.need()
assert needed.resources() == [x1, z1, z2]
def test_check_resource_dependencies():
foo = Library('foo', '')
r1 = Resource(foo, 'r1.css')
r2 = Resource(foo, 'r2.css')
r3 = Resource(foo, 'r3.css', depends=[r1, r2])
# https://bitbucket.org/fanstatic/fanstatic/issue/63
# If a resource is a mode (debug, minified) of a resource, its
# dependencies should be the same or a subset of the dependencies that
# this mode replaces.
with pytest.raises(ModeResourceDependencyError):
Resource(foo, 'r4.css', depends=[r1], minified=r3)
def test_normalize_string():
foo = Library('foo', '')
r1 = Resource(foo, 'r1.css', minified='r1.min.css')
assert isinstance(r1.modes['minified'], Resource)
def test_sort_group_per_renderer():
foo = Library('foo', '')
a_js = Resource(foo, 'a.js')
b_css = Resource(foo, 'b.css')
c_js = Resource(foo, 'c.js')
a1_js = Resource(foo, 'a1.js', depends=[b_css])
needed = NeededResources()
needed.need(a_js)
needed.need(b_css)
needed.need(c_js)
needed.need(a1_js)
assert needed.resources() == [b_css, a_js, c_js, a1_js]
def test_sort_group_per_library():
foo = Library('foo', '')
bar = Library('bar', '')
e = Resource(foo, 'e.js')
d = Resource(foo, 'd.js', depends=[e])
c = Resource(bar, 'c.js', depends=[e])
b = Resource(bar, 'b.js')
a = Resource(bar, 'a.js', depends=[c])
needed = NeededResources()
needed.need(a)
needed.need(b)
needed.need(c)
needed.need(d)
needed.need(e)
assert needed.resources() == [e, d, b, c, a]
def test_sort_library_by_name():
b_lib = Library('b_lib', '')
a_lib = Library('a_lib', '')
a_a = Resource(a_lib, 'a.js')
a_b = Resource(b_lib, 'a.js')
needed = NeededResources()
needed.need(a_b)
needed.need(a_a)
assert needed.resources() == [a_a, a_b]
def test_sort_resources_libraries_together():
K = Library('K', '')
L = Library('L', '')
M = Library('M', '')
N = Library('N', '')
k1 = Resource(K, 'k1.js')
l1 = Resource(L, 'l1.js')
m1 = Resource(M, 'm1.js', depends=[k1])
m2 = Resource(M, 'm2.js', depends=[l1])
n1 = Resource(N, 'n1.js', depends=[m1])
needed = NeededResources()
needed.need(m1)
needed.need(m2)
# sort_resources makes an efficient ordering, grouping m1 and m2 together
# after their dependencies (they are in the same library)
assert needed.resources() == [k1, l1, m1, m2]
needed = NeededResources()
needed.need(n1)
needed.need(m2)
# the order is unaffected by the ordering of inclusions
assert needed.resources() == [k1, l1, m1, m2, n1]
def test_sort_resources_library_sorting():
# a complicated example that makes sure libraries are sorted
# correctly to obey ordering constraints but still groups them
X = Library('X', '')
Y = Library('Y', '')
Z = Library('Z', '')
a = Resource(X, 'a.js')
b = Resource(Z, 'b.js', depends=[a])
c = Resource(Y, 'c.js')
c1 = Resource(Y, 'c1.js', depends=[c])
c2 = Resource(Y, 'c2.js', depends=[c1])
d = Resource(Z, 'd.js', depends=[c])
e = Resource(Z, 'e.js')
needed = NeededResources()
needed.need(b)
needed.need(c2)
needed.need(d)
needed.need(e)
assert needed.resources() == [a, c, c1, c2, e, b, d]
def test_sort_resources_library_sorting_by_name():
# these libraries are all at the same level so should be sorted by name
X = Library('X', '')
Y = Library('Y', '')
Z = Library('Z', '')
a = Resource(X, 'a.js')
b = Resource(Y, 'b.js')
c = Resource(Z, 'c.js')
needed = NeededResources()
needed.need(a)
needed.need(b)
needed.need(c)
assert needed.resources() == [a, b, c]
def test_sort_resources_library_sorting_by_name_deeper():
X = Library('X', '')
Y = Library('Y', '')
Z = Library('Z', '')
# only X and Z will be at the same level now
a = Resource(X, 'a.js')
c = Resource(Z, 'c.js')
b = Resource(Y, 'b.js', depends=[a, c])
needed = NeededResources()
needed.need(b)
assert needed.resources() == [a, c, b]
def test_library_nr():
X = Library('X', '')
Y = Library('Y', '')
Z = Library('Z', '')
# only X and Z will be at the same level now
a = Resource(X, 'a.js')
c = Resource(Z, 'c.js')
b = Resource(Y, 'b.js', depends=[a, c])
X.init_library_nr()
Y.init_library_nr()
Z.init_library_nr()
assert a.library.library_nr == 0
assert c.library.library_nr == 0
assert b.library.library_nr == 1
def test_library_dependency_cycles():
A = Library('A', '')
B = Library('B', '')
a1 = Resource(A, 'a1.js')
b1 = Resource(B, 'b1.js')
a2 = Resource(A, 'a2.js', depends=[b1])
# This definition would create a library dependency cycle if permitted.
with pytest.raises(LibraryDependencyCycleError):
b2 = Resource(B, 'b2.js', depends=[a1])
# This is an example of an indirect library dependency cycle.
C = Library('C', '')
D = Library('D', '')
E = Library('E', '')
c1 = Resource(C, 'c1.js')
d1 = Resource(D, 'd1.js', depends=[c1])
d2 = Resource(D, 'd2.js')
e1 = Resource(E, 'e1.js', depends=[d2])
# ASCII ART
#
# C E D
#
# c1 <--------- d1
#
# c2 --> e1 --> d2
#
with pytest.raises(LibraryDependencyCycleError):
c2 = Resource(C, 'c2.js', depends=[e1])
def test_sort_resources_topological():
foo = Library('foo', '')
a1 = Resource(foo, 'a1.js')
a2 = Resource(foo, 'a2.js', depends=[a1])
a3 = Resource(foo, 'a3.js', depends=[a2])
a5 = Resource(foo, 'a5.js', depends=[a3])
assert sort_resources_topological([a5, a3, a1, a2]) == [a1, a2, a3, a5]
def test_bundle():
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css')
needed = NeededResources(bundle=True)
needed.need(a)
needed.need(b)
resources = bundle_resources(needed.resources())
assert len(resources) == 1
bundle = resources[0]
assert bundle.resources() == [a, b]
def test_bundle_dont_bundle_at_the_end():
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css')
c = Resource(foo, 'c.css', dont_bundle=True)
needed = NeededResources(bundle=True)
needed.need(a)
needed.need(b)
needed.need(c)
resources = bundle_resources(needed.resources())
assert len(resources) == 2
assert resources[0].resources() == [a, b]
assert resources[-1] is c
def test_bundle_dont_bundle_at_the_start():
foo = Library('foo', '')
a = Resource(foo, 'a.css', dont_bundle=True)
b = Resource(foo, 'b.css')
c = Resource(foo, 'c.css')
needed = NeededResources(bundle=True)
needed.need(a)
needed.need(b)
needed.need(c)
resources = bundle_resources(needed.resources())
assert len(resources) == 2
assert resources[0] is a
assert resources[1].resources() == [b, c]
def test_bundle_dont_bundle_in_the_middle():
# now construct a scenario where a dont_bundle resource is in the way
# of bundling
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css', dont_bundle=True)
c = Resource(foo, 'c.css')
needed = NeededResources(bundle=True)
needed.need(a)
needed.need(b)
needed.need(c)
resources = needed.resources()
assert len(resources) == 3
assert resources[0] is a
assert resources[1] is b
assert resources[2] is c
def test_bundle_resources_bottomsafe():
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css', bottom=True)
needed = NeededResources(resources=[a,b], bundle=True)
assert needed.render_topbottom() == ('''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/+bundle+a.css+b.css" />''', '')
needed = NeededResources(resources=[a,b], bundle=True, bottom=True)
assert needed.render_topbottom() == ('''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/a.css" />''', '''\
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" />''')
def test_bundle_different_renderer():
# resources with different renderers aren't bundled
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.js')
needed = NeededResources(bundle=True)
needed.need(a)
needed.need(b)
resources = needed.resources()
assert len(resources) == 2
assert resources[0] is a
assert resources[1] is b
def test_bundle_different_library():
# resources with different libraries aren't bundled
l1 = Library('l1', '')
l2 = Library('l2', '')
a = Resource(l1, 'a.js')
b = Resource(l2, 'b.js')
needed = NeededResources(bundle=True)
needed.need(a)
needed.need(b)
resources = needed.resources()
assert len(resources) == 2
assert resources[0] is a
assert resources[1] is b
def test_bundle_different_directory():
# resources with different directories aren't bundled
foo = Library('foo', '')
a = Resource(foo, 'first/a.css')
b = Resource(foo, 'second/b.css')
needed = NeededResources(bundle=True)
needed.need(a)
needed.need(b)
resources = needed.resources()
assert len(resources) == 2
assert resources[0] is a
assert resources[1] is b
def test_bundle_empty_list():
# we can successfully bundle an empty list of resources
needed = NeededResources(bundle=True)
resources = needed.resources()
assert resources == []
def test_bundle_single_entry():
# we can successfully bundle a single resource (it's not bundled though)
foo = Library('foo', '')
a = Resource(foo, 'a.js')
needed = NeededResources(bundle=True)
needed.need(a)
resources = needed.resources()
assert resources == [a]
def test_bundle_single_dont_bundle_entry():
foo = Library('foo', '')
a = Resource(foo, 'a.js', dont_bundle=True)
needed = NeededResources(bundle=True)
needed.need(a)
resources = needed.resources()
assert resources == [a]
def test_inter_library_dependencies_ordering():
lib1 = Library('lib1', '')
lib2 = Library('lib2', '')
lib3 = Library('lib3', '')
lib4 = Library('lib4', '')
js1 = Resource(lib1, 'js1.js')
js2 = Resource(lib2, 'js2.js', depends=[js1])
js3 = Resource(lib3, 'js3.js', depends=[js2])
style1 = Resource(lib3, 'style1.css')
style2 = Resource(lib4, 'style2.css', depends=[style1])
needed = NeededResources()
needed.need(js3)
needed.need(style2)
resources = needed.resources()
assert resources == [style1, style2, js1, js2, js3]
def test_library_ordering_bug():
jquery_lib = Library('jquery', '')
jqueryui_lib = Library('jqueryui', '')
obviel_lib = Library('obviel', '')
bread_lib = Library('bread', '')
app_lib = Library('app', '')
jquery = Resource(jquery_lib, 'jquery.js')
jqueryui = Resource(jqueryui_lib, 'jqueryui.js', depends=[jquery])
obviel = Resource(obviel_lib, 'obviel.js', depends=[jquery])
obviel_forms = Resource(obviel_lib, 'obviel_forms.js',
depends=[obviel])
obviel_datepicker = Resource(obviel_lib, 'obviel_datepicker.js',
depends=[obviel_forms, jqueryui])
vtab = Resource(bread_lib, 'vtab.js', depends=[jqueryui])
tabview = Resource(bread_lib, 'tabview.js', depends=[obviel, vtab])
bread = Resource(bread_lib, 'bread.js', depends=[tabview, obviel_forms])
app = Resource(app_lib, 'app.js', depends=[bread, obviel_datepicker])
needed = NeededResources()
needed.need(app)
resources = needed.resources()
for resource in resources:
six.print_(resource, resource.library.library_nr)
assert resources == [jquery, jqueryui, obviel, obviel_forms,
obviel_datepicker, vtab, tabview, bread, app]
#assert resources == [obviel, forms, forms_autocomplete, tabview, bread,
# zorgdas]
# XXX tests for hashed resources when this is enabled. Needs some plausible
# directory to test for hashes
# XXX better error reporting if unknown extensions are used
|
|
from __future__ import print_function
import os
import json
import unittest
import abc
import riotwatcher
import data_path
default_report_freq = 100
default_report_callback = print
default_summoner_id = 30890339
default_n_ids_min = 10
default_summoner_ids_directory = data_path.summoner_ids_dir
default_champions_usage_directory = data_path.champions_usage_dir
n_summoners_key = 'n_summoners'
name_key = 'name'
class_key = 'class'
initial_summoner_id_key = 'initial_summoner_id'
n_summoners_required_key = 'n_summoners_required'
tier_key = 'tier'
class DataCollector:
__metaclass__ = abc.ABCMeta
def __init__(self, riot=None, report_callback=default_report_callback, report_freq=default_report_freq):
if riot is None:
riot = riotwatcher.RiotWatcher()
self.riot = riot
if report_callback is None:
report_callback = default_report_callback
self.report_callback = report_callback
self.report_freq = report_freq
class SummonerIdsCollector(DataCollector):
def __init__(self, riot=None, report_callback=default_report_callback, report_freq=default_report_freq):
DataCollector.__init__(self, riot=riot, report_callback=report_callback, report_freq=report_freq)
def match_id_to_summoner_ids(self, match_id):
self.riot.wait()
try:
match_detail = self.riot.get_match(match_id)
return [participantIdentity['player']['summonerId'] for participantIdentity in
match_detail['participantIdentities']]
except riotwatcher.LoLException as e:
print('LoLException: ' + str(e) + ', in match_id_to_summoner_ids with match_id = ' + str(match_id))
return []
def summoner_id_to_match_ids(self, summoner_id):
self.riot.wait()
try:
match_history = self.riot.get_match_list(summoner_id, ranked_queues=['RANKED_SOLO_5x5'], seasons=['SEASON2015'])
# return [match_summary['matchId'] for match_summary in match_history['matches'] if match_summary['season']=='SEASON2015']
return [match_summary['matchId'] for match_summary in match_history['matches']]
except riotwatcher.LoLException as e:
print('LoLException: ' + str(e) + ', in summoner_id_to_match_ids with summoner_id = ' + str(summoner_id))
return []
def collect_summoner_ids(self, n_ids_min, initial_summoner_id):
summoner_ids = set()
match_ids = set()
pending_summoner_ids = [initial_summoner_id]
pending_match_ids = []
while len(summoner_ids) < n_ids_min and len(pending_summoner_ids) > 0:
for s_id in pending_summoner_ids:
for m_id in self.summoner_id_to_match_ids(s_id):
if m_id not in match_ids:
match_ids.add(m_id)
pending_match_ids.append(m_id)
if 5 * len(match_ids) > n_ids_min:
break
del pending_summoner_ids[:]
for m_id in pending_match_ids:
for s_id in self.match_id_to_summoner_ids(m_id):
if s_id not in summoner_ids:
summoner_ids.add(s_id)
if len(summoner_ids) % self.report_freq == 0:
self.report_callback('Collecting summoner ids: {}/{} done'.format(len(summoner_ids), n_ids_min))
pending_summoner_ids.append(s_id)
if len(summoner_ids) > n_ids_min:
break
del pending_match_ids[:]
return summoner_ids
def make_summoner_ids(self, initial_id=default_summoner_id, n_min=default_n_ids_min, name=None):
if name is None:
name = 'init{}min{}'.format(initial_id, n_min)
data = self.collect_summoner_ids(n_min, initial_id)
data = list(data)
summoner_ids = SummonerIds.Cons(initial_id, n_min, name, data)
return summoner_ids
def make_summoner_ids_master(self, name=None):
if name is None:
name = 'master'
self.riot.wait()
resp = self.riot.get_master()
data = [int(entry['playerOrTeamId']) for entry in resp['entries']]
summoner_ids = SummonerIds.Cons_tier('master', name, data)
return summoner_ids
#endclass SummonerIdsCollector
class DataWrapper:
__metaclass__ = abc.ABCMeta
def __init__(self, infos, data, name):
self.infos = infos
self.data = data
self.name = name
@staticmethod
@abc.abstractmethod
def default_directory():
raise NotImplementedError
def get_info(self, key):
return self.infos.get(key)
@property
def get_n_summoners(self):
return self.get_info(n_summoners_key)
class SummonerIds(DataWrapper):
def __init__(self, dic, data):
DataWrapper.__init__(self, dic, data, dic['name'])
@staticmethod
def default_directory():
return data_path.summoner_ids_dir
@classmethod
def Cons(cls, initial_summoner_id, n_summoners_required, name, data):
d = {class_key: cls.__name__,
n_summoners_key: len(data),
name_key: name,
initial_summoner_id_key: initial_summoner_id,
n_summoners_required_key: n_summoners_required}
si = SummonerIds(d, data)
return si
@classmethod
def Cons_tier(cls, tier, name, data, n_summoners_required=None):
d = {class_key: cls.__name__,
n_summoners_key: len(data),
name_key: name,
tier_key: tier}
if n_summoners_required is not None:
d[n_summoners_required_key] = n_summoners_required
si = SummonerIds(d, data)
return si
class DataFilesHandler:
def __init__(self, directory=None):
self.directory = directory
class UndefinedFilenameError(Exception):
pass
def decide_filename(self, default_name, default_directory,
enforce_name, enforce_directory, enforce_fullname):
if enforce_fullname is not None:
return enforce_fullname
name = default_name
directory = default_directory
if enforce_name is not None:
name = enforce_name
if self.directory is not None:
directory = self.directory
if enforce_directory is not None:
directory = enforce_directory
if name is None or directory is None:
raise self.UndefinedFilenameError
return os.path.join(directory, name)
def dump(self, datawrapper, enforce_name=None, enforce_directory=None, enforce_fullname=None):
filename = self.decide_filename(datawrapper.name, datawrapper.default_directory,
enforce_name, enforce_directory, enforce_fullname)
s1 = json.dumps(datawrapper.infos)
s2 = json.dumps(datawrapper.data)
with open(filename, 'w') as f:
f.write(s1+'\n'+s2)
return filename
def readlines(self, name=None, enforce_directory=None, fullname=None):
filename = self.decide_filename(None, None, name, enforce_directory, fullname)
with open(filename, 'r') as f:
s1 = f.readline()
s2 = f.readline()
return s1, s2
def load_cls(self, cls, name=None, enforce_directory=None, fullname=None):
s1, s2 = self.readlines(name=name, enforce_directory=enforce_directory, fullname=fullname)
dic = json.loads(s1)
data = json.loads(s2)
return cls(dic, data)
def load_infos(self, name=None, enforce_directory=None, fullname=None):
s1, s2 = self.readlines(name=name, enforce_directory=enforce_directory, fullname=fullname)
return json.loads(s1)
def make_ClsFilesHandler(cls):
class ClsFilesHandler(DataFilesHandler):
def __init__(self, directory=None):
DataFilesHandler.__init__(self, directory=directory)
if self.directory is None:
self.directory = cls.default_directory()
def load(self, name=None, enforce_directory=None, fullname=None):
return self.load_cls(cls, name=name, enforce_directory=enforce_directory, fullname=fullname)
return ClsFilesHandler
SummonerIdsFilesHandler = make_ClsFilesHandler(SummonerIds)
def collect_master_summoner_ids():
# riot = riotwatcher.RiotWatcher()
# riot.wait()
# resp = riot.get_master()
# ids = [int(entry['playerOrTeamId']) for entry in resp['entries']]
# print(len(ids))
# print(ids)
# with open(data_path.summoner_ids_master, 'w') as f:
# json.dump(ids, f)
collector = SummonerIdsCollector()
summoner_ids = collector.make_summoner_ids_master()
h = SummonerIdsFilesHandler()
h.dump(summoner_ids)
class TestCollectSummonerIds(unittest.TestCase):
name = 'TestCollectSummonerIds'
reports = []
def send_report(self, s):
self.reports.append(s)
def test(self):
n_min = 30
self.reports = []
collector = SummonerIdsCollector(report_freq=10, report_callback=self.send_report)
summoner_ids = collector.make_summoner_ids(n_min=n_min, name=self.name)
self.assertGreaterEqual(summoner_ids.get_info('n_summoners'), n_min)
for i in [1,2,3]:
j = 10*i
self.assertEqual(self.reports.pop(0), 'Collecting summoner ids: {}/{} done'.format(j, n_min))
class TestSummonerIdsFilesHandler(unittest.TestCase):
name = 'test'
def test(self):
collector = SummonerIdsCollector()
summoner_ids = collector.make_summoner_ids(name=self.name)
h = SummonerIdsFilesHandler()
h.dump(summoner_ids)
summoner_ids1 = h.load(self.name)
self.assertEqual(summoner_ids.infos, summoner_ids1.infos)
self.assertEqual(summoner_ids.data, summoner_ids1.data)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import shutil
import yaml
METADATA_DIR_NAME = '.catkin_tools'
METADATA_README_TEXT = """\
# Catkin Tools Metadata
This directory was generated by catkin_tools and it contains persistent
configuration information used by the `catkin` command and its sub-commands.
Each subdirectory contains a set of persistent configuration options for
separate "profiles." The default profile is called `default`. If another
profile is desired, it can be described in the `profiles.yaml` file in this
directory.
Please see the catkin_tools documentation before editing any files in this
directory. Most actions can be performed with the `catkin` command-line
program.
"""
PROFILES_YML_FILE_NAME = 'profiles.yaml'
DEFAULT_PROFILE_NAME = 'default'
def get_metadata_root_path(workspace_path):
"""Construct the path to a root metadata directory.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:returns: The path to the metadata root directory or None if workspace_path isn't a string
:rtype: str or None
"""
# TODO: Should calling this without a string just be a fatal error?
if workspace_path is None:
return None
return os.path.join(workspace_path, METADATA_DIR_NAME)
def get_paths(workspace_path, profile_name, verb=None):
"""Get the path to a metadata directory and verb-specific metadata file.
Note: these paths are not guaranteed to exist. This function simply serves
to standardize where these files should be located.
:param workspace_path: The path to the root of a catkin workspace
:type workspace_path: str
:param profile_name: The catkin_tools metadata profile name
:type profile_name: str
:param verb: (optional) The catkin_tools verb with which this information is associated.
:returns: A tuple of the metadata directory and the verb-specific file path, if given
"""
# Get the root of the metadata directory
metadata_root_path = get_metadata_root_path(workspace_path)
# Get the active profile directory
metadata_path = os.path.join(metadata_root_path, profile_name) if profile_name else None
# Get the metadata for this verb
metadata_file_path = os.path.join(metadata_path, '%s.yaml' % verb) if profile_name and verb else None
return (metadata_path, metadata_file_path)
def find_enclosing_workspace(search_start_path):
"""Find a catkin workspace based on the existence of a catkin_tools
metadata directory starting in the path given by search_path and traversing
each parent directory until either finding such a directory or getting to
the root of the filesystem.
:search_start_path: Directory which either is a catkin workspace or is
contained in a catkin workspace
:returns: Path to the workspace if found, `None` if not found.
"""
while search_start_path:
# Check if marker file exists
candidate_path = get_metadata_root_path(search_start_path)
if os.path.exists(candidate_path) and os.path.isdir(candidate_path):
return search_start_path
# Update search path or end
(search_start_path, child_path) = os.path.split(search_start_path)
if len(child_path) == 0:
break
return None
def init_metadata_root(workspace_path, reset=False):
"""Create or reset a catkin_tools metadata directory with no content in a given path.
:param workspace_path: The exact path to the root of a catkin workspace
:type workspace_path: str
:param reset: If true, clear the metadata directory of all information
:type reset: bool
"""
# Make sure the directory
if not os.path.exists(workspace_path):
raise IOError(
"Can't initialize Catkin workspace in path %s because it does "
"not exist." % (workspace_path))
# Check if the desired workspace is enclosed in another workspace
marked_workspace = find_enclosing_workspace(workspace_path)
if marked_workspace and marked_workspace != workspace_path:
raise IOError(
"Can't initialize Catkin workspace in path %s because it is "
"already contained in another workspace: %s." %
(workspace_path, marked_workspace))
# Construct the full path to the metadata directory
metadata_root_path = get_metadata_root_path(workspace_path)
# Check if a metadata directory already exists
if os.path.exists(metadata_root_path):
# Reset the directory if requested
if reset:
print("Deleting existing metadata from catkin_tools metadata directory: %s" % (metadata_root_path))
shutil.rmtree(metadata_root_path)
os.mkdir(metadata_root_path)
else:
# Create a new .catkin_tools directory
os.mkdir(metadata_root_path)
# Write the README file describing the directory
with open(os.path.join(metadata_root_path, 'README'), 'w') as metadata_readme:
metadata_readme.write(METADATA_README_TEXT)
def init_profile(workspace_path, profile_name, reset=False):
"""Initialize a profile directory in a given workspace.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:param profile_name: The catkin_tools metadata profile name to initialize
:type profile_name: str
"""
(profile_path, _) = get_paths(workspace_path, profile_name)
# Check if a profile directory already exists
if os.path.exists(profile_path):
# Reset the directory if requested
if reset:
print("Deleting existing profile from catkin_tools profile profile directory: %s" % (profile_path))
shutil.rmtree(profile_path)
os.mkdir(profile_path)
else:
# Create a new .catkin_tools directory
os.mkdir(profile_path)
def get_profile_names(workspace_path):
"""Get a list of profile names available to a given workspace.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:returns: A list of the available profile names in the given workspace
:rtype: list
"""
metadata_root_path = get_metadata_root_path(workspace_path)
if os.path.exists(metadata_root_path):
directories = os.walk(metadata_root_path).next()[1]
return directories
return []
def remove_profile(workspace_path, profile_name):
"""Remove a profile by name.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:param profile_name: The catkin_tools metadata profile name to delete
:type profile_name: str
"""
(profile_path, _) = get_paths(workspace_path, profile_name)
if os.path.exists(profile_path):
shutil.rmtree(profile_path)
def set_active_profile(workspace_path, profile_name):
"""Set a profile in a given workspace to be active.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:param profile_name: The catkin_tools metadata profile name to activate
:type profile_name: str
"""
profiles_data = get_profiles_data(workspace_path)
profiles_data['active'] = profile_name
metadata_root_path = get_metadata_root_path(workspace_path)
profiles_yaml_file_path = os.path.join(metadata_root_path, PROFILES_YML_FILE_NAME)
with open(profiles_yaml_file_path, 'w') as profiles_file:
yaml.dump(profiles_data, profiles_file, default_flow_style=False)
def get_active_profile(workspace_path):
"""Get the active profile name from a workspace path.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:returns: The active profile name
:rtype: str
"""
profiles_data = get_profiles_data(workspace_path)
if 'active' in profiles_data:
return profiles_data['active']
return DEFAULT_PROFILE_NAME
def get_profiles_data(workspace_path):
"""Get the contents of the profiles file.
This file contains information such as the currently active profile.
:param workspace_path: The exact path to the root of a catkin_tools workspace
:type workspace_path: str
:returns: The contents of the root profiles file if it exists
:rtype: dict
"""
if workspace_path is not None:
metadata_root_path = get_metadata_root_path(workspace_path)
profiles_yaml_file_path = os.path.join(metadata_root_path, PROFILES_YML_FILE_NAME)
if os.path.exists(profiles_yaml_file_path):
with open(profiles_yaml_file_path, 'r') as profiles_file:
return yaml.load(profiles_file)
return {}
def get_metadata(workspace_path, profile, verb):
"""Get a python structure representing the metadata for a given verb.
:param workspace_path: The exact path to the root of a catkin workspace
:type workspace_path: str
:param profile: The catkin_tools metadata profile name
:type profile: str
:param verb: The catkin_tools verb with which this information is associated
:type verb: str
:returns: A python structure representing the YAML file contents (empty
dict if the file does not exist)
:rtype: dict
"""
(metadata_path, metadata_file_path) = get_paths(workspace_path, profile, verb)
if not os.path.exists(metadata_file_path):
return {}
with open(metadata_file_path, 'r') as metadata_file:
return yaml.load(metadata_file)
def update_metadata(workspace_path, profile, verb, new_data={}):
"""Update the catkin_tools verb metadata for a given profile.
:param workspace_path: The path to the root of a catkin workspace
:type workspace_path: str
:param profile: The catkin_tools metadata profile name
:type profile: str
:param verb: The catkin_tools verb with which this information is associated
:type verb: str
:param new_data: A python dictionary or array to write to the metadata file
:type new_data: dict
"""
(metadata_path, metadata_file_path) = get_paths(workspace_path, profile, verb)
# Make sure the metadata directory exists
init_metadata_root(workspace_path)
init_profile(workspace_path, profile)
# Get the curent metadata for this verb
data = get_metadata(workspace_path, profile, verb) or dict()
# Update the metadata for this verb
data.update(new_data)
with open(metadata_file_path, 'w') as metadata_file:
yaml.dump(data, metadata_file, default_flow_style=False)
def get_active_metadata(workspace_path, verb):
"""Get a python structure representing the metadata for a given verb.
:param workspace_path: The exact path to the root of a catkin workspace
:type workspace_path: str
:param verb: The catkin_tools verb with which this information is associated
:type verb: str
:returns: A python structure representing the YAML file contents (empty
dict if the file does not exist)
:rtype: dict
"""
active_profile = get_active_profile(workspace_path)
get_metadata(workspace_path, active_profile, verb)
def update_active_metadata(workspace_path, verb, new_data={}):
"""Update the catkin_tools verb metadata for the active profile.
:param workspace_path: The path to the root of a catkin workspace
:type workspace_path: str
:param verb: The catkin_tools verb with which this information is associated
:type verb: str
:param new_data: A python dictionary or array to write to the metadata file
:type new_data: dict
"""
active_profile = get_active_profile(workspace_path)
update_active_metadata(workspace_path, active_profile, verb, new_data)
|
|
"""
Filename: calc_global_metric.py
Author: Damien Irving, irving.damien@gmail.com
Description: Calculate global metric
"""
# Import general Python modules
import sys, os, pdb
import argparse
import numpy
import iris
import iris.analysis.cartography
from iris.experimental.equalise_cubes import equalise_attributes
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import convenient_universal as uconv
import timeseries
import grids
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
history = []
def save_history(cube, field, filename):
"""Save the history attribute when reading the data.
(This is required because the history attribute differs between input files
and is therefore deleted upon equilising attributes)
"""
history.append(cube.attributes['history'])
def read_optional(optional_file):
"""Read an optional file (e.g. area, basin) file."""
if optional_file:
if 'no_data' in optional_file:
cube = None
else:
cube = iris.load_cube(optional_file)
else:
cube = None
return cube
def set_attributes(inargs, data_cube, area_cube, sftlf_cube, areas_dict):
"""Set the attributes for the output cube."""
atts = data_cube.attributes
infile_history = {}
infile_history[inargs.infiles[0]] = history[0]
if area_cube:
infile_history[inargs.area_file] = area_cube.attributes['history']
if sftlf_cube:
infile_history[inargs.sftlf_file[0]] = sftlf_cube.attributes['history']
atts['history'] = gio.write_metadata(file_info=infile_history)
atts.update(areas_dict)
return atts
def calc_mean_anomaly(cube, sign, grid_areas):
"""Calculate the mean of all the positive or negative anomalies."""
if sign == 'positive':
new_mask = numpy.where((cube.data.mask == False) & (cube.data > 0.0), False, True)
elif sign == 'negative':
new_mask = numpy.where((cube.data.mask == False) & (cube.data < 0.0), False, True)
cube.data.mask = new_mask
cube = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)
cube.remove_coord('longitude')
cube.remove_coord('latitude')
return cube
def calc_bulk_deviation(cube, grid_areas, atts):
"""Calculate bulk deviation metric.
Usually used for sea surface salinity
(e.g. Figure 3.21 of the IPCC AR5 report)
Definition: difference between the average positive
and average negative spatial anomaly.
"""
fldmean = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)
cube_spatial_anom = cube - fldmean
ave_pos_anom = calc_mean_anomaly(cube_spatial_anom.copy(), 'positive', grid_areas)
ave_neg_anom = calc_mean_anomaly(cube_spatial_anom.copy(), 'negative', grid_areas)
metric = ave_pos_anom - ave_neg_anom
metric.metadata = cube.metadata
return metric
def get_area_weights(cube, area_cube):
"""Get area weights for averaging"""
if area_cube:
area_weights = uconv.broadcast_array(area_cube.data, [1, 2], cube.shape)
else:
if not cube.coord('latitude').has_bounds():
cube.coord('latitude').guess_bounds()
if not cube.coord('longitude').has_bounds():
cube.coord('longitude').guess_bounds()
area_weights = iris.analysis.cartography.area_weights(cube)
return area_weights
def calc_global_mean(cube, grid_areas, atts, remove_atts=True):
"""Calculate global mean."""
global_mean = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)
if remove_atts:
global_mean.remove_coord('longitude')
global_mean.remove_coord('latitude')
global_mean.attributes = atts
return global_mean
def calc_grid_deviation(cube, var, grid_areas, atts):
"""Calculate the global mean |x - x_spatial_mean|.
Doesn't calculate the spatial mean for P-E
(already centered on zero)
"""
metadata = cube.metadata
if var != 'precipitation_minus_evaporation_flux':
global_mean = calc_global_mean(cube, grid_areas, atts, remove_atts=False)
cube = cube - global_mean
abs_val = (cube ** 2) ** 0.5
abs_val.metadata = metadata
global_mean_abs = abs_val.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)
global_mean_abs.remove_coord('longitude')
global_mean_abs.remove_coord('latitude')
global_mean_abs.attributes = atts
return global_mean_abs
def smooth_data(cube, smooth_type):
"""Apply temporal smoothing to a data cube."""
assert smooth_type in ['annual', 'annual_running_mean']
if smooth_type == 'annual_running_mean':
cube = cube.rolling_window('time', iris.analysis.MEAN, 12)
elif smooth_type == 'annual':
cube = timeseries.convert_to_annual(cube)
return cube
def area_info(area_cube, mask, selected_region):
"""Determine the area of the ocean and land."""
areas_dict = {}
regions = ['ocean', 'land']
regions.remove(selected_region)
area_cube.data = numpy.ma.asarray(area_cube.data)
area_cube.data.mask = mask
areas_dict["area_" + selected_region] = area_cube.data.sum()
inverted_mask = numpy.invert(mask)
area_cube.data.mask = inverted_mask
areas_dict["area_" + regions[0]] = area_cube.data.sum()
return areas_dict
def create_mask(land_fraction_cube, selected_region):
"""Create a mask."""
regions = ['ocean', 'land']
assert selected_region in regions
if selected_region == 'ocean':
mask = numpy.where(land_fraction_cube.data < 50, False, True)
elif selected_region == 'land':
mask = numpy.where(land_fraction_cube.data > 50, False, True)
return mask
def get_constraints(depth_selection, hemisphere_selection):
"""Get the constraints for loading input data."""
if depth_selection:
level_constraint = iris.Constraint(depth=depth_selection)
else:
level_constraint = iris.Constraint()
if hemisphere_selection == 'nh':
lat_subset = lambda cell: cell >= 0.0
lat_constraint = iris.Constraint(latitude=lat_subset)
elif hemisphere_selection == 'sh':
lat_subset = lambda cell: cell <= 0.0
lat_constraint = iris.Constraint(latitude=lat_subset)
else:
lat_constraint = iris.Constraint()
return level_constraint, lat_constraint
def main(inargs):
"""Run the program."""
# Read data
level_constraint, lat_constraint = get_constraints(inargs.depth, inargs.hemisphere)
cube = iris.load(inargs.infiles, gio.check_iris_var(inargs.var) & level_constraint, callback=save_history)
equalise_attributes(cube)
iris.util.unify_time_units(cube)
cube = cube.concatenate_cube()
cube = gio.check_time_units(cube)
# Get area file (if applicable)
if inargs.hemisphere:
cube, coord_names, regrid_status = grids.curvilinear_to_rectilinear(cube)
cube = cube.extract(lat_constraint)
area_cube = None
else:
area_cube = read_optional(inargs.area_file)
# Mask ocean or atmosphere (if applicable)
if inargs.sftlf_file:
sftlf_file, selected_region = inargs.sftlf_file
sftlf_cube = read_optional(sftlf_file)
mask = create_mask(sftlf_cube, selected_region)
cube.data = numpy.ma.asarray(cube.data)
cube.data.mask = mask
if area_cube:
areas_dict = area_info(area_cube.copy(), mask, selected_region)
else:
areas_dict = {}
sftlf_cube = None
# Outfile attributes
atts = set_attributes(inargs, cube, area_cube, sftlf_cube, areas_dict)
# Temporal smoothing
if inargs.smoothing:
cube = smooth_data(cube, inargs.smoothing)
# Calculate metric
area_weights = get_area_weights(cube, area_cube)
if inargs.metric == 'bulk-deviation':
metric = calc_bulk_deviation(cube, area_weights, atts)
elif inargs.metric == 'mean':
metric = calc_global_mean(cube, area_weights, atts)
elif inargs.metric == 'grid-deviation':
metric = calc_grid_deviation(cube, inargs.var, area_weights, atts)
iris.save(metric, inargs.outfile)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description='Calculate a global metric'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infiles", type=str, nargs='*', help="Input data files (can merge on time)")
parser.add_argument("var", type=str, help="Input variable name (i.e. the standard_name)")
parser.add_argument("metric", type=str, choices=('mean', 'bulk-deviation', 'grid-deviation'), help="Metric to calculate")
parser.add_argument("outfile", type=str, help="Output file name")
parser.add_argument("--area_file", type=str, default=None,
help="Input cell area file")
parser.add_argument("--sftlf_file", type=str, nargs=2, metavar=('FILE', 'SELECTION'), default=None,
help="Land surface fraction file used to generate mask (SELECTION = land or ocean)")
parser.add_argument("--smoothing", type=str, choices=('annual', 'annual_running_mean'), default=None,
help="Apply smoothing to data")
parser.add_argument("--depth", type=float, default=None,
help="Level selection")
parser.add_argument("--hemisphere", type=str, choices=('nh' ,'sh'), default=None,
help="Restrict data to one hemisphere")
args = parser.parse_args()
main(args)
|
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from sahara import conductor as cond
from sahara import context
from sahara.plugins import recommendations_utils as ru
from sahara.tests.unit import base as b
conductor = cond.API
class Configs(object):
def __init__(self, configs):
self.configs = configs
def to_dict(self):
return self.configs
class FakeObject(object):
def __init__(self, **kwargs):
for attr in six.iterkeys(kwargs):
setattr(self, attr, kwargs.get(attr))
class TestProvidingRecommendations(b.SaharaWithDbTestCase):
@mock.patch('sahara.utils.openstack.nova.get_flavor')
def test_get_recommended_node_configs_medium_flavor(
self, fake_flavor):
ng = FakeObject(flavor_id="fake_flavor", node_configs=Configs({}))
cl = FakeObject(cluster_configs=Configs({}))
fake_flavor.return_value = FakeObject(ram=4096, vcpus=2)
observed = ru.HadoopAutoConfigsProvider(
{}, [], cl, False)._get_recommended_node_configs(ng)
self.assertEqual({
'mapreduce.reduce.memory.mb': 768,
'mapreduce.map.java.opts': '-Xmx307m',
'mapreduce.map.memory.mb': 384,
'mapreduce.reduce.java.opts': '-Xmx614m',
'yarn.app.mapreduce.am.resource.mb': 384,
'yarn.app.mapreduce.am.command-opts': '-Xmx307m',
'mapreduce.task.io.sort.mb': 153,
'yarn.nodemanager.resource.memory-mb': 3072,
'yarn.scheduler.minimum-allocation-mb': 384,
'yarn.scheduler.maximum-allocation-mb': 3072,
'yarn.nodemanager.vmem-check-enabled': 'false'
}, observed)
@mock.patch('sahara.utils.openstack.nova.get_flavor')
def test_get_recommended_node_configs_small_flavor(
self, fake_flavor):
ng = FakeObject(flavor_id="fake_flavor", node_configs=Configs({}))
cl = FakeObject(cluster_configs=Configs({}))
fake_flavor.return_value = FakeObject(ram=2048, vcpus=1)
observed = ru.HadoopAutoConfigsProvider(
{'node_configs': {}, 'cluster_configs': {}}, [], cl, False,
)._get_recommended_node_configs(ng)
self.assertEqual({
'mapreduce.reduce.java.opts': '-Xmx409m',
'yarn.app.mapreduce.am.resource.mb': 256,
'mapreduce.reduce.memory.mb': 512,
'mapreduce.map.java.opts': '-Xmx204m',
'yarn.app.mapreduce.am.command-opts': '-Xmx204m',
'mapreduce.task.io.sort.mb': 102,
'mapreduce.map.memory.mb': 256,
'yarn.nodemanager.resource.memory-mb': 2048,
'yarn.scheduler.minimum-allocation-mb': 256,
'yarn.nodemanager.vmem-check-enabled': 'false',
'yarn.scheduler.maximum-allocation-mb': 2048,
}, observed)
def test_merge_configs(self):
provider = ru.HadoopAutoConfigsProvider({}, None, None, False)
initial_configs = {
'cat': {
'talk': 'meow',
},
'bond': {
'name': 'james'
}
}
extra_configs = {
'dog': {
'talk': 'woof'
},
'bond': {
'extra_name': 'james bond'
}
}
expected = {
'cat': {
'talk': 'meow',
},
'dog': {
'talk': 'woof'
},
'bond': {
'name': 'james',
'extra_name': 'james bond'
}
}
self.assertEqual(
expected, provider._merge_configs(initial_configs, extra_configs))
@mock.patch('sahara.utils.openstack.nova.get_flavor')
@mock.patch('sahara.plugins.recommendations_utils.conductor.'
'node_group_update')
@mock.patch('sahara.plugins.recommendations_utils.conductor.'
'cluster_update')
def test_apply_recommended_configs(self, cond_cluster, cond_node_group,
fake_flavor):
class TestProvider(ru.HadoopAutoConfigsProvider):
def get_datanode_name(self):
return "dog_datanode"
fake_flavor.return_value = FakeObject(ram=2048, vcpus=1)
to_tune = {
'cluster_configs': {
'dfs.replication': ('dfs', 'replica')
},
'node_configs': {
'mapreduce.task.io.sort.mb': ('bond', 'extra_name')
}
}
fake_plugin_configs = [
FakeObject(applicable_target='dfs', name='replica',
default_value=3)]
fake_ng = FakeObject(
use_autoconfig=True,
count=2,
node_processes=['dog_datanode'],
flavor_id='fake_id',
node_configs=Configs({
'bond': {
'name': 'james'
}
})
)
fake_cluster = FakeObject(
cluster_configs=Configs({
'cat': {
'talk': 'meow',
}
}),
node_groups=[fake_ng],
use_autoconfig=True,
extra=Configs({})
)
v = TestProvider(
to_tune, fake_plugin_configs, fake_cluster, False)
v.apply_recommended_configs()
self.assertEqual([mock.call(context.ctx(), fake_cluster, {
'cluster_configs': {
'cat': {
'talk': 'meow'
},
'dfs': {
'replica': 2
}
}
}), mock.call(
context.ctx(), fake_cluster,
{'extra': {'auto-configured': True}})],
cond_cluster.call_args_list)
self.assertEqual([mock.call(context.ctx(), fake_ng, {
'node_configs': {
'bond': {
'name': 'james',
'extra_name': 102
}
}
})], cond_node_group.call_args_list)
@mock.patch('sahara.utils.openstack.nova.get_flavor')
@mock.patch('sahara.plugins.recommendations_utils.conductor.'
'node_group_update')
@mock.patch('sahara.plugins.recommendations_utils.conductor.'
'cluster_update')
def test_apply_recommended_configs_no_updates(
self, cond_cluster, cond_node_group, fake_flavor):
fake_flavor.return_value = FakeObject(ram=2048, vcpus=1)
to_tune = {
'cluster_configs': {
'dfs.replication': ('dfs', 'replica')
},
'node_configs': {
'mapreduce.task.io.sort.mb': ('bond', 'extra_name')
}
}
fake_plugin_configs = [
FakeObject(applicable_target='dfs', name='replica',
default_value=3)]
fake_ng = FakeObject(
use_autoconfig=True,
count=2,
node_processes=['dog_datanode'],
flavor_id='fake_id',
node_configs=Configs({
'bond': {
'extra_name': 'james bond'
}
})
)
fake_cluster = FakeObject(
cluster_configs=Configs({
'dfs': {
'replica': 1
}
}),
node_groups=[fake_ng],
use_autoconfig=True,
extra=Configs({})
)
v = ru.HadoopAutoConfigsProvider(
to_tune, fake_plugin_configs, fake_cluster, False)
v.apply_recommended_configs()
self.assertEqual(0, cond_node_group.call_count)
self.assertEqual(
[mock.call(context.ctx(), fake_cluster,
{'extra': {'auto-configured': True}})],
cond_cluster.call_args_list)
def test_correct_use_autoconfig_value(self):
ctx = context.ctx()
ngt1 = conductor.node_group_template_create(ctx, {
'name': 'ngt1',
'flavor_id': '1',
'plugin_name': 'vanilla',
'hadoop_version': '1'
})
ngt2 = conductor.node_group_template_create(ctx, {
'name': 'ngt2',
'flavor_id': '2',
'plugin_name': 'vanilla',
'hadoop_version': '1',
'use_autoconfig': False
})
self.assertTrue(ngt1.use_autoconfig)
self.assertFalse(ngt2.use_autoconfig)
clt = conductor.cluster_template_create(ctx, {
'name': "clt1",
'plugin_name': 'vanilla',
'hadoop_version': '1',
'node_groups': [
{
'count': 3,
"node_group_template_id": ngt1.id
},
{
'count': 1,
'node_group_template_id': ngt2.id
}
],
'use_autoconfig': False
})
cluster = conductor.cluster_create(ctx, {
'name': 'stupid',
'cluster_template_id': clt.id
})
self.assertFalse(cluster.use_autoconfig)
for ng in cluster.node_groups:
if ng.name == 'ngt1':
self.assertTrue(ng.use_autoconfig)
else:
self.assertFalse(ng.use_autoconfig)
@mock.patch('sahara.plugins.recommendations_utils.conductor.'
'cluster_update')
def test_not_autonconfigured(self, cluster_update):
fake_cluster = FakeObject(extra=Configs({}))
v = ru.HadoopAutoConfigsProvider({}, [], fake_cluster, True)
v.apply_recommended_configs()
self.assertEqual(0, cluster_update.call_count)
|
|
# moose_parse.py
# This parser is basically a somewhat stripped-down version of the
# public domain Yeanpypa parser by Markus Brueckner. Very many thanks
# to Markus for doing Yeanpypa.
# The main differences: Some of the leading underscores have been
# removed from variable names. Some code comments have been removed.
# The exception-handling code has been removed. This is just my own
# preference - I like to be hands-on with the parser code, without
# exceptions code getting in the way and cluttering things up.
# This code is released to the public domain.
# "Share and enjoy..." ;)
import logging
class InputReader(object):
def __init__(self, string, ignore_white):
self.current_pos = 0
self.stack = []
self.string = string
self.length = len(self.string)
self.ignore_white = ignore_white
def getPos(self):
return self.current_pos
def skipWhite(self):
while (self.current_pos < (self.length) and self.string[self.current_pos].isspace()):
self.current_pos += 1
def getString(self):
if self.ignore_white:
self.skipWhite()
if self.current_pos+self.length > (self.length):
raise EndOfStringException()
# Start of the string is at the current position
start = self.current_pos
self.current_pos += self.length
return self.string[start:self.current_pos]
def getChar(self):
if self.current_pos == len(self.string):
raise EndOfStringException()
if self.ignore_white:
self.skipWhite()
logging.debug("Getting char at position %d" % self.current_pos)
logging.debug("Getting char at position %d" % self.current_pos)
# We return the char at the current position
char = self.string[self.current_pos]
# Advance the pointer by one
self.current_pos += 1
return char
def checkPoint(self):
self.stack.append(self.current_pos)
def rollback(self):
if len(self.stack) == 0:
raise EmptyStackException()
self.current_pos = self.stack[-1]
self.stack = self.stack[:-1]
def deleteCheckpoint(self):
if len(self.stack) == 0:
pass
#raise EmptyStackException()
self.stack = self.stack[:-1]
def fullyConsumed(self):
return len(self.string) == self.current_pos
def getIgnoreState(self):
return self.ignore_white
def setIgnoreState(self, state):
self.ignore_white = state
class ParseException():
def __init__(self):
return None
class EndOfStringException():
def __init__(self):
return None
class ParseResult(object):
def __init__(self, input_reader, token):
self.input_reader = input_reader
self.token = token
def full(self):
return self.input_reader.fullyConsumed()
def getTokens(self):
return self.token
class Rule(object):
action = None
hide_token = False
def match(input_reader):
pass
def __add__(self, second_rule):
return AndRule(self, second_rule)
def __or__(self, second_rule):
return OrRule(self, second_rule)
def setaction(self, action):
self.action = action
return self
def callAction(self, param):
if self.action:
if isinstance(param, list):
return self.action(param)
else:
return self.action([param])
else:
return param
def hide(self):
self.hide_token = True
return self
def returnToken(self, token):
if self.hide_token:
return None
else:
return token
class Literal(Rule):
def __init__(self, string):
self.string = string
def __str__(self):
return "\"%s\"" % self.string
def match(self, input_reader):
# Save the position of the pointer
input_reader.checkPoint()
try:
string = input_reader.getString()
# The input does not match our string
if string != self.string:
# Roll back the parse
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at position %d. Got '%s'" % (self.string, input_reader.getPos(), string))
except EndOfStringException:
# End of string reached without a match
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at end of string" % self.string)
# We have a successful match, so delete the checkpoint.
input_reader.deleteCheckpoint()
logging.debug("Matched \"%s\"" % self)
# Return the string and call its action (if it has one).
return self.returnToken(self.callAction([self.string]))
class AnyOf(Rule):
"""
A class to match chars from a charset. The class matches exactly one of the chars from
the given charset. Whitespaces are matched depending on the setting of the input reader.
Note that if the input reader is set to ignore whitespaces, they will not be matched even
if the charset contains a whitespace character.
"""
def __init__(self, set):
"""
Initialize the object with a given set.
@param set: the charset this rule should match
@type set: str
"""
self.set = set
def __str__(self):
"""
Return a human readable representation of the rule.
@return: A string describing the rule
"""
return "AnyOf(%s)" % self.set
def match(self, input_reader):
"""
Match a character from the input. Depending on the setting of the input reader, the next
character ist matched directly or the next non-whitespace character is matched.
@param input_reader: The input to read from.
@type input_reader: InputReader
@return: The matched character
"""
input_reader.checkPoint()
char = ''
try:
char = input_reader.getChar()
if not (char in self.set):
input_reader.rollback()
raise ParseException("Expected char from: [%s] at %d" % (self.set, input_reader.getPos()))
except EndOfStringException:
input_reader.rollback()
raise ParseException("Expected char from: [%s] at %d" % (self.set, input_reader.getPos()))
input_reader.deleteCheckpoint()
logging.debug("Matched %s" % char)
return self.returnToken(self.callAction([char]))
class NoneOf(Rule):
"""
Match if the next character is NOT in the given set.
"""
def __init__(self, set):
"""
Initialize the rule with the given set.
@param set: The char set the rule should NOT match on.
@type set: str
"""
self.set = set
def __str__(self):
"""
Return a human readable representation of the rule.
@return A string describing the rule
"""
return "NoneOf(%s)" % self.set
def match(self, input_reader):
"""
Match the rule against the input.
@param input_reader: The input reader to read the next character from.
@type input_reader: InputReader
@return: The matched char not in the set.
"""
input_reader.checkPoint()
char = ''
try:
char = input_reader.getChar()
if char in self.set:
input_reader.rollback()
raise ParseException("Expected char not from: [%s] at %d" % (self.set, input_reader.getPos()))
except EndOfStringException:
input_reader.rollback()
raise ParseException("Expected char not from: [%s] at %d" % (self.set, input_reader.getPos()))
input_reader.deleteCheckpoint()
logging.debug("Matched %s" % char)
return self.returnToken(self.callAction([char]))
class AndRule(Rule):
def __init__(self, left_rule, right_rule):
self.subrules = [left_rule, right_rule]
def __str__(self):
return "(%s)" % ' '.join(map(str, self.subrules))
def __add__(self, right_rule):
self.subrules.append(right_rule)
return self
def match(self, input_reader):
retval = []
try:
input_reader.checkPoint()
for rule in self.subrules:
result = rule.match(input_reader)
if result != None:
retval.append(result)
input_reader.deleteCheckpoint()
except ParseException:
input_reader.rollback()
#raise
return self.returnToken(self.callAction(retval))
class OrRule(Rule):
def __init__(self, left_rule, right_rule):
self.subrules = [left_rule, right_rule]
def __str__(self):
return "(%s)" % ' | '.join(map(str, self.subrules))
def __or__(self, right_rule):
self.subrules.append(right_rule)
return self
def match(self, input_reader):
retval = []
try:
input_reader.checkPoint()
for rule in self.subrules:
result = rule.match(input_reader)
if result != None:
retval.append(result)
input_reader.deleteCheckpoint()
except ParseException:
input_reader.rollback()
#raise
return self.returnToken(self.callAction(retval))
class Optional(Rule):
#This rule matches its subrule optionally once. If the subrule does
#not match, the Optional() rule matches anyway.
def __init__(self, rule):
#Initialize the rule with a subrule.
#@param rule: The rule to match optionally
#@type rule: Rule
self.rule = rule
def __str__(self):
#Return a string representation of this rule.
#@return: a human readable representation of this rule.
return "[ %s ]" % str(self.rule)
def match(self, input_reader):
#Match this rule against the input.
#@param input_reader: The input reader to read from.
#@type input_reader: InputReader
#@return A list of token matched by the subrule (or None, if none)
try:
rule_match = self.rule.match(input_reader)
logging.debug("Matched %s" % self)
return self.returnToken(self.callAction(rule_match))
except ParseException:
pass
class OneOrMore(Rule):
#Match a rule once or more. This rule matches its subrule at least
#once or as often as possible.
def __init__(self, string):
#Initialize the rule with the appropriate subrule.
#@param rule: The subrule to match.
#@type rule: Rule
self.string = string
def __str__(self):
#Return a human-readable representation of the rule.
#@return: A string describing this rule.
return "\"%s\"" % self.string
def match(self, input_reader):
# Save the position of the pointer
input_reader.checkPoint()
try:
string = input_reader.getString()
# The input does not match our string
if string != self.string:
# Roll back the parse
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at position %d. Got '%s'" % (self.string, input_reader.getPos(), string))
except EndOfStringException:
# End of string reached without a match
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at end of string" % self.string)
# We have a successful match, so delete the checkpoint.
input_reader.deleteCheckpoint()
logging.debug("Matched \"%s\"" % self)
# Return the string and call its action (if it has one).
return self.returnToken(self.callAction([self.string]))
class Combine(Rule):
"""
Pseudo rule that recursivly combines all of it's children into one token.
This rule is useful if the token of a group of subrules should be combined
to form one string.
"""
def __init__(self, rule):
"""
Initialize the rule with a subrule. The token generated by
the subrule are recursivly combined into one string.
@param rule: The subrule to combine.
@type rule: Rule
"""
self.rule = rule
def __str__(self):
"""
Return a human-readable description of the rule.
@return: A string describing this rule.
"""
return "Combine(%s)" % str(self.rule)
def combine(self, token):
"""
Recursivly combine all token into a single one. This is an internal helper that
recursivly combines a list of lists (or strings) into one string.
@param token: the token list to combine into one string.
@type token: list or str
"""
if token==None:
return None
#retval = ''
retval=[]
for tok in token:
retval.append(tok)
return retval
def match(self, input_reader):
"""
Match this rule against the input. The rule matches the input
against its subrule and combines the resulting token into a
string.
@param input_reader: The input reader to read from.
@type input_reader: InputReader
@return: A string combining all the token generated by the subrule.
"""
retval = self.combine(self.rule.match(input_reader))
return self.returnToken(self.callAction(retval))
def Word(param):
"""
a shortcut for Combine(MatchWhite(OneOrMore(AnyOf(string)))) or
Combine(MatchWhite(OneOrMore(param))) (depending on the type of
param). See there for further details.
"""
if isinstance(param, str):
return Combine(MatchWhite(OneOrMore(AnyOf(param))))
else:
return Combine(MatchWhite(OneOrMore(param)))
class ZeroOrMore(Rule):
"""
Match a rule ad infinitum. This rule is similar to the Optional()
rule. While this one only matches if the subrule matches 0 or 1
times, the ZeroOrMore rule matches at any time. This rule tries to
consume as much input as possible.
"""
def __init__(self, rule):
"""
Initialize this rule with a subrule. The subrule is
transformed to a Optional(OneOrMore(rule)) construct.
@param rule: The subrule to match.
@type rule: Rule
"""
self.rule = Optional(OneOrMore(rule))
def __str__(self):
"""
Return a human readable representation of the rule.
@return A description of this rule.
"""
return "{ %s }" % str(self.rule)
def match(self, input_reader):
"""
Match the input against the subrule.
@param input_reader: The input reader to read from.
@type input_reader: InputReader
@return: A list of token generated by the matching of the subrule.
"""
retval = self.rule.match(input_reader)
return self.returnToken(self.callAction(retval))
class IgnoreWhite(Rule):
"""
A pseudo-rule to tell the parser to temporary ignore
whitespaces. This rule itself does not match anything. It merely
sets the input reader into 'ignore whitespace' mode and returns
the token produced by its subrule. After executing the subrule,
the ignore state of the input reader is reset (i.e. if it was
'ignore' before, it will be afterwards, if it was 'match', it will
be that).
"""
def __init__(self, rule):
"""
Initialize the rule with a subrule.
@param rule: The subrule to match.
@type rule: Rule
"""
self.rule = rule
def __str__(self):
"""
Return a human-readable representation of this rule.
@return: A string describing this rule.
"""
return "IgnoreWhite(%s)" % str(self.rule)
def match(self, input_reader):
"""
Match the input against this rule. The input reader is set to
'ignore whitespace' mode, the subrule is matched, the ignore
state of the input reader is reset and the result of the
subrule is returned.
@param input_reader: The input reader to read any input from.
@type input_reader: InputReader
@return: The results of the subrule.
"""
ignore = input_reader.getIgnoreState()
input_reader.setIgnoreState(True)
try:
result = self.rule.match(input_reader)
except:
input_reader.setIgnoreState(ignore)
raise
input_reader.setIgnoreState(ignore)
return self.returnToken(self.callAction(result))
class MatchWhite(Rule):
"""
A pseudo-rule to tell the parser to temporary match
whitespaces. This rule is the counterpart of the IgnoreWhite
rule. It sets the input reader into 'match whitespace' mode and
matches the given subrule.
"""
def __init__(self, rule):
"""
Initialize this rule with a subrule.
@param rule: The rule to match as a subrule.
@type rule: Rule
"""
self.rule = rule
def __str__(self):
"""
Return a human-readable description of the rule.
@return: A human-readable description of this rule.
"""
return "MatchWhite(%s)" % str(self.rule)
def match(self, input_reader):
"""
Match this rule against the input. The rule sets the input
reader into 'match whitespace' mode, matches the subrule,
resets the ignore state and returns the results of the
subrule.
@param input_reader: The input reader to read input from.
@type input_reader: InputReader
@return: A list of token generated by the subrule.
"""
ignore = input_reader.getIgnoreState()
input_reader.setIgnoreState(False)
# skip the trailing whitespace before the subrule matches.
input_reader.skipWhite()
try:
result = self.rule.match(input_reader)
except:
input_reader.setIgnoreState(ignore)
raise
input_reader.setIgnoreState(ignore)
return self.returnToken(self.callAction(result))
class Alpha(Rule):
#Match a string containing only letters.
def __init__(self, string):
self.string = string
def __str__(self):
#Return a human-readable description of the rule.
#@return: A human-readable description of this rule.
return "Alpha(%s)" % str(self.string)
def match(self, input_reader):
#Match the input
#@param input_reader: The input reader to read input from.
#@type input_reader: InputReader
#@return: The matched character, if any.
input_reader.checkPoint()
try:
string = input_reader.getString(len(self.string))
# The input does not match our string
if not(string.isAlpha()):
# Roll back the parse
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at position %d. Got '%s'" % (self.string, input_reader.getPos(), string))
except EndOfStringException:
# End of string reached without a match
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at end of string" % self.string)
# We have a successful match, so delete the checkpoint.
input_reader.deleteCheckpoint()
logging.debug("Matched \"%s\"" % self)
# Return the string and call its action (if it has one).
return self.returnToken(self.callAction([string]))
class Digit(Rule):
#Match a string containing only digits.
def __init__(self, string):
self.string = str(string)
def __str__(self):
#Return a human-readable description of the rule.
#@return: A human-readable description of this rule.
return "Digit(%s)" % str(self.rule)
def match(self, input_reader):
#Match the input
#@param input_reader: The input reader to read input from.
#@type input_reader: InputReader
#@return: The matched character, if any.
input_reader.checkPoint()
try:
string = input_reader.getString(len(self.string))
# The input does not match our string
if not(string.isDigit()):
# Roll back the parse
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at position %d. Got '%s'" % (self.string, input_reader.getPos(), string))
except EndOfStringException:
# End of string reached without a match
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at end of string" % self.string)
# We have a successful match, so delete the checkpoint.
input_reader.deleteCheckpoint()
logging.debug("Matched \"%s\"" % self)
# Return the string and call its action (if it has one).
return self.returnToken(self.callAction([string]))
class Alnum(Rule):
#Match a string containing letters and digits.
def __init__(self, string):
self.string = string
def __str__(self):
#Return a human-readable description of the rule.
#@return: A human-readable description of this rule.
return "Alnum(%s)" % str(self.string)
def match(self, input_reader):
#Match the input
#@param input_reader: The input reader to read input from.
#@type input_reader: InputReader
#@return: The matched character, if any.
input_reader.checkPoint()
try:
string = input_reader.getString(len(self.string))
# The input does not match our string
if not(string.isAlnum()):
# Roll back the parse
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at position %d. Got '%s'" % (self.string, input_reader.getPos(), string))
except EndOfStringException:
# End of string reached without a match
input_reader.rollback()
#raise ParseException()
#raise ParseException("Expected '%s' at end of string" % self.string)
# We have a successful match, so delete the checkpoint.
input_reader.deleteCheckpoint()
logging.debug("Matched \"%s\"" % self)
# Return the string and call its action (if it has one).
return self.returnToken(self.callAction([string]))
# Useful parsers
integer = Word(Digit)
letters = Word(Alpha)
hexdigit = AnyOf('0123456789abcdefABCDEF')
def parse(parser, string, ignore_white=True):
input_reader = InputReader(string, ignore_white)
tokens = parser.match(input_reader)
return ParseResult(input_reader, tokens)
# A function to parse input
def parseit(grammar_name, input):
result = parse(grammar_name, input)
if result.full():
print "Success!"
else:
print "Fail"
|
|
import os
import redis
from wsgiref.simple_server import WSGIRequestHandler
from pymongo import MongoClient
import mysql.connector
import oauth2.store.mongodb
import oauth2.store.memory
import oauth2.store.dbapi.mysql
import oauth2.store.redisdb
class NoLoggingHandler(WSGIRequestHandler):
"""
Turn off logging access to STDERR in the standard WSGI request handler.
"""
def log_message(self, format, *args):
pass
def store_factory(client_identifier, client_secret, redirect_uris):
stores = {"access_token_store": None, "auth_code_store": None,
"client_store": None}
database = os.environ.get("DB")
if database == "mongodb":
creator_class = MongoDbStoreCreator
elif database == "mysql":
creator_class = MysqlStoreCreator
elif database == "redis-server":
creator_class = RedisStoreCreator
else:
creator_class = MemoryStoreCreator
creator = creator_class(client_identifier, client_secret, redirect_uris)
creator.initialize()
creator.before_create()
stores["access_token_store"] = creator.create_access_token_store()
stores["auth_code_store"] = creator.create_auth_code_store()
stores["client_store"] = creator.create_client_store()
creator.after_create()
return stores
class StoreCreator(object):
def __init__(self, client_identifier, client_secret, redirect_uris):
self.client_identifier = client_identifier
self.client_secret = client_secret
self.redirect_uris = redirect_uris
def initialize(self):
pass
def after_create(self):
pass
def before_create(self):
pass
def create_access_token_store(self):
raise NotImplementedError
def create_auth_code_store(self):
raise NotImplementedError
def create_client_store(self):
raise NotImplementedError
class MemoryStoreCreator(StoreCreator):
def initialize(self):
self.client_store = oauth2.store.memory.ClientStore()
self.token_store = oauth2.store.memory.TokenStore()
def create_access_token_store(self):
return self.token_store
def create_auth_code_store(self):
return self.token_store
def create_client_store(self):
return self.client_store
def after_create(self):
self.client_store.add_client(client_id=self.client_identifier,
client_secret=self.client_secret,
redirect_uris=self.redirect_uris)
class MongoDbStoreCreator(StoreCreator):
def initialize(self):
client = MongoClient('127.0.0.1', 27017)
self.db = client.test_database
def create_access_token_store(self):
return oauth2.store.mongodb.AccessTokenStore(
collection=self.db["access_tokens"]
)
def create_auth_code_store(self):
return oauth2.store.mongodb.AuthCodeStore(
collection=self.db["auth_codes"]
)
def create_client_store(self):
return oauth2.store.mongodb.ClientStore(collection=self.db["clients"])
def after_create(self):
self.db["clients"].insert({
"identifier": "abc",
"secret": "xyz",
"redirect_uris": ["http://127.0.0.1:15487/callback"]
})
class MysqlStoreCreator(StoreCreator):
create_tables = """
DROP TABLE IF EXISTS `access_tokens`;
CREATE TABLE IF NOT EXISTS `access_tokens` (
`id` INT NOT NULL AUTO_INCREMENT COMMENT 'Unique identifier',
`client_id` VARCHAR(32) NOT NULL COMMENT 'The identifier of a client. Assuming it is an arbitrary text which is a maximum of 32 characters long.',
`grant_type` ENUM('authorization_code', 'implicit', 'password', 'client_credentials', 'refresh_token') NOT NULL COMMENT 'The type of a grant for which a token has been issued.',
`token` CHAR(36) NOT NULL COMMENT 'The access token.',
`expires_at` TIMESTAMP NULL COMMENT 'The timestamp at which the token expires.',
`refresh_token` CHAR(36) NULL COMMENT 'The refresh token.',
`refresh_expires_at` TIMESTAMP NULL COMMENT 'The timestamp at which the refresh token expires.',
`user_id` INT NULL COMMENT 'The identifier of the user this token belongs to.',
PRIMARY KEY (`id`),
INDEX `fetch_by_refresh_token` (`refresh_token` ASC),
INDEX `fetch_existing_token_of_user` (`client_id` ASC, `grant_type` ASC, `user_id` ASC))
ENGINE = InnoDB;
DROP TABLE IF EXISTS `access_token_scopes`;
CREATE TABLE IF NOT EXISTS `access_token_scopes` (
`id` INT NOT NULL AUTO_INCREMENT,
`name` VARCHAR(32) NOT NULL COMMENT 'The name of scope.',
`access_token_id` INT NOT NULL COMMENT 'The unique identifier of the access token this scope belongs to.',
PRIMARY KEY (`id`))
ENGINE = InnoDB;
DROP TABLE IF EXISTS `access_token_data`;
CREATE TABLE IF NOT EXISTS `access_token_data` (
`id` INT NOT NULL AUTO_INCREMENT,
`key` VARCHAR(32) NOT NULL COMMENT 'The key of an entry converted to the key in a Python dict.',
`value` VARCHAR(32) NOT NULL COMMENT 'The value of an entry converted to the value in a Python dict.',
`access_token_id` INT NOT NULL COMMENT 'The unique identifier of the access token a row belongs to.',
PRIMARY KEY (`id`))
ENGINE = InnoDB;
DROP TABLE IF EXISTS `auth_codes`;
CREATE TABLE IF NOT EXISTS `auth_codes` (
`id` INT NOT NULL AUTO_INCREMENT,
`client_id` VARCHAR(32) NOT NULL COMMENT 'The identifier of a client. Assuming it is an arbitrary text which is a maximum of 32 characters long.',
`code` CHAR(36) NOT NULL COMMENT 'The authorisation code.',
`expires_at` TIMESTAMP NOT NULL COMMENT 'The timestamp at which the token expires.',
`redirect_uri` VARCHAR(128) NULL COMMENT 'The redirect URI send by the client during the request of an authorisation code.',
`user_id` INT NULL COMMENT 'The identifier of the user this authorisation code belongs to.',
PRIMARY KEY (`id`),
INDEX `fetch_code` (`code` ASC))
ENGINE = InnoDB;
DROP TABLE IF EXISTS `auth_code_data`;
CREATE TABLE IF NOT EXISTS `auth_code_data` (
`id` INT NOT NULL AUTO_INCREMENT,
`key` VARCHAR(32) NOT NULL COMMENT 'The key of an entry converted to the key in a Python dict.',
`value` VARCHAR(32) NOT NULL COMMENT 'The value of an entry converted to the value in a Python dict.',
`auth_code_id` INT NOT NULL COMMENT 'The identifier of the authorisation code that this row belongs to.',
PRIMARY KEY (`id`))
ENGINE = InnoDB;
DROP TABLE IF EXISTS `auth_code_scopes`;
CREATE TABLE IF NOT EXISTS `auth_code_scopes` (
`id` INT NOT NULL AUTO_INCREMENT,
`name` VARCHAR(32) NOT NULL,
`auth_code_id` INT NOT NULL,
PRIMARY KEY (`id`))
ENGINE = InnoDB;
DROP TABLE IF EXISTS `clients`;
CREATE TABLE IF NOT EXISTS `clients` (
`id` INT NOT NULL AUTO_INCREMENT,
`identifier` VARCHAR(32) NOT NULL COMMENT 'The identifier of a client.',
`secret` VARCHAR(32) NOT NULL COMMENT 'The secret of a client.',
PRIMARY KEY (`id`))
ENGINE = InnoDB;
DROP TABLE IF EXISTS `client_grants`;
CREATE TABLE IF NOT EXISTS `client_grants` (
`id` INT NOT NULL AUTO_INCREMENT,
`name` VARCHAR(32) NOT NULL,
`client_id` INT NOT NULL COMMENT 'The id of the client a row belongs to.',
PRIMARY KEY (`id`))
ENGINE = InnoDB;
DROP TABLE IF EXISTS `client_redirect_uris`;
CREATE TABLE IF NOT EXISTS `client_redirect_uris` (
`id` INT NOT NULL AUTO_INCREMENT,
`redirect_uri` VARCHAR(128) NOT NULL COMMENT 'A URI of a client.',
`client_id` INT NOT NULL COMMENT 'The id of the client a row belongs to.',
PRIMARY KEY (`id`))
ENGINE = InnoDB;
DROP TABLE IF EXISTS `client_response_types`;
CREATE TABLE IF NOT EXISTS `client_response_types` (
`id` INT NOT NULL AUTO_INCREMENT,
`response_type` VARCHAR(32) NOT NULL COMMENT 'The response type that a client can use.',
`client_id` INT NOT NULL COMMENT 'The id of the client a row belongs to.',
PRIMARY KEY (`id`))
ENGINE = InnoDB;"""
def initialize(self):
self.connection = mysql.connector.connect(host="127.0.0.1",
user="root", passwd="",
db="testdb")
def create_access_token_store(self):
return oauth2.store.dbapi.mysql.\
MysqlAccessTokenStore(connection=self.connection)
def create_auth_code_store(self):
return oauth2.store.dbapi.mysql.\
MysqlAuthCodeStore(connection=self.connection)
def create_client_store(self):
return oauth2.store.dbapi.mysql.\
MysqlClientStore(connection=self.connection)
def before_create(self):
# Execute each query on its own instead of one big query.
# The one big query caused errors where some tables were not created
# in every run of the tests.
for stmt in self.create_tables.split(';'):
cursor = self.connection.cursor()
try:
cursor.execute(stmt)
self.connection.commit()
finally:
cursor.close()
def after_create(self):
cursor = self.connection.cursor()
try:
cursor.execute(
"INSERT INTO clients (identifier, secret) VALUES (%s, %s)",
("abc", "xyz"))
client_row_id = cursor.lastrowid
self.connection.commit()
finally:
cursor.close()
cursor = self.connection.cursor()
try:
cursor.execute(
"""INSERT INTO client_redirect_uris
(redirect_uri, client_id)
VALUES (%s, %s)""",
("http://127.0.0.1:15487/callback", client_row_id)
)
self.connection.commit()
finally:
cursor.close()
class RedisStoreCreator(StoreCreator):
def initialize(self):
self.r = redis.StrictRedis(host="localhost", port=6379, db=0)
self.client_store = oauth2.store.redisdb.ClientStore(rs=self.r)
self.token_store = oauth2.store.redisdb.TokenStore(rs=self.r)
def create_access_token_store(self):
return self.token_store
def create_auth_code_store(self):
return self.token_store
def create_client_store(self):
return self.client_store
def after_create(self):
self.client_store.add_client(client_id=self.client_identifier,
client_secret=self.client_secret,
redirect_uris=self.redirect_uris)
|
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases() # NOQA
from chainer import optimizers
from chainer import testing
import gym
import gym.spaces
import basetest_agents as base
from chainerrl import agents
from chainerrl import explorers
from chainerrl import policies
from chainerrl import q_functions
from chainerrl import replay_buffer
from chainerrl import v_function
def create_stochastic_policy_for_env(env):
assert isinstance(env.observation_space, gym.spaces.Box)
ndim_obs = env.observation_space.low.size
if isinstance(env.action_space, gym.spaces.Discrete):
return policies.FCSoftmaxPolicy(ndim_obs, env.action_space.n)
elif isinstance(env.action_space, gym.spaces.Box):
return policies.FCGaussianPolicy(
ndim_obs, env.action_space.low.size,
bound_mean=False)
else:
raise NotImplementedError()
def create_deterministic_policy_for_env(env):
assert isinstance(env.observation_space, gym.spaces.Box)
assert isinstance(env.action_space, gym.spaces.Box)
ndim_obs = env.observation_space.low.size
return policies.FCDeterministicPolicy(
n_input_channels=ndim_obs,
action_size=env.action_space.low.size,
n_hidden_channels=200,
n_hidden_layers=2,
bound_action=False)
def create_state_q_function_for_env(env):
assert isinstance(env.observation_space, gym.spaces.Box)
ndim_obs = env.observation_space.low.size
if isinstance(env.action_space, gym.spaces.Discrete):
return q_functions.FCStateQFunctionWithDiscreteAction(
ndim_obs=ndim_obs,
n_actions=env.action_space.n,
n_hidden_channels=200,
n_hidden_layers=2)
elif isinstance(env.action_space, gym.spaces.Box):
return q_functions.FCQuadraticStateQFunction(
n_input_channels=ndim_obs,
n_dim_action=env.action_space.low.size,
n_hidden_channels=200,
n_hidden_layers=2,
action_space=env.action_space)
else:
raise NotImplementedError()
def create_state_action_q_function_for_env(env):
assert isinstance(env.observation_space, gym.spaces.Box)
assert isinstance(env.action_space, gym.spaces.Box)
ndim_obs = env.observation_space.low.size
return q_functions.FCSAQFunction(
n_dim_obs=ndim_obs,
n_dim_action=env.action_space.low.size,
n_hidden_channels=200,
n_hidden_layers=2)
def create_v_function_for_env(env):
assert isinstance(env.observation_space, gym.spaces.Box)
ndim_obs = env.observation_space.low.size
return v_function.FCVFunction(ndim_obs)
@testing.parameterize(*testing.product({
'discrete': [True, False],
'partially_observable': [False],
'episodic': [False],
}))
class TestA3C(base._TestAgentInterface):
def create_agent(self, env):
model = agents.a3c.A3CSeparateModel(
pi=create_stochastic_policy_for_env(env),
v=create_v_function_for_env(env))
opt = optimizers.Adam()
opt.setup(model)
return agents.A3C(model, opt, t_max=1, gamma=0.99)
@testing.parameterize(*testing.product({
'discrete': [True],
'partially_observable': [False],
'episodic': [False],
}))
class TestACER(base._TestAgentInterface):
def create_agent(self, env):
model = agents.acer.ACERSeparateModel(
pi=create_stochastic_policy_for_env(env),
q=create_state_q_function_for_env(env))
opt = optimizers.Adam()
opt.setup(model)
rbuf = replay_buffer.EpisodicReplayBuffer(10 ** 4)
return agents.ACER(model, opt, t_max=1, gamma=0.99,
replay_buffer=rbuf)
@testing.parameterize(*testing.product({
'discrete': [True, False],
'partially_observable': [False],
'episodic': [False],
}))
class TestDQN(base._TestAgentInterface):
def create_agent(self, env):
model = create_state_q_function_for_env(env)
rbuf = replay_buffer.ReplayBuffer(10 ** 5)
opt = optimizers.Adam()
opt.setup(model)
explorer = explorers.ConstantEpsilonGreedy(
0.2, random_action_func=lambda: env.action_space.sample())
return agents.DQN(model, opt, rbuf, gamma=0.99, explorer=explorer)
@testing.parameterize(*testing.product({
'discrete': [True, False],
'partially_observable': [False],
'episodic': [False],
}))
class TestDoubleDQN(base._TestAgentInterface):
def create_agent(self, env):
model = create_state_q_function_for_env(env)
rbuf = replay_buffer.ReplayBuffer(10 ** 5)
opt = optimizers.Adam()
opt.setup(model)
explorer = explorers.ConstantEpsilonGreedy(
0.2, random_action_func=lambda: env.action_space.sample())
return agents.DoubleDQN(
model, opt, rbuf, gamma=0.99, explorer=explorer)
@testing.parameterize(*testing.product({
'discrete': [True, False],
'partially_observable': [False],
'episodic': [False],
}))
class TestNSQ(base._TestAgentInterface):
def create_agent(self, env):
model = create_state_q_function_for_env(env)
opt = optimizers.Adam()
opt.setup(model)
explorer = explorers.ConstantEpsilonGreedy(
0.2, random_action_func=lambda: env.action_space.sample())
return agents.NSQ(
q_function=model,
optimizer=opt,
t_max=1,
gamma=0.99,
i_target=100,
explorer=explorer)
@testing.parameterize(*testing.product({
'discrete': [False],
'partially_observable': [False],
'episodic': [False],
}))
class TestDDPG(base._TestAgentInterface):
def create_agent(self, env):
model = agents.ddpg.DDPGModel(
policy=create_deterministic_policy_for_env(env),
q_func=create_state_action_q_function_for_env(env))
rbuf = replay_buffer.ReplayBuffer(10 ** 5)
opt_a = optimizers.Adam()
opt_a.setup(model.policy)
opt_b = optimizers.Adam()
opt_b.setup(model.q_function)
explorer = explorers.AdditiveGaussian(scale=1)
return agents.DDPG(model, opt_a, opt_b, rbuf, gamma=0.99,
explorer=explorer)
@testing.parameterize(*testing.product({
'discrete': [False],
'partially_observable': [False],
'episodic': [False],
}))
class TestPGT(base._TestAgentInterface):
def create_agent(self, env):
model = agents.ddpg.DDPGModel(
policy=create_stochastic_policy_for_env(env),
q_func=create_state_action_q_function_for_env(env))
rbuf = replay_buffer.ReplayBuffer(10 ** 5)
opt_a = optimizers.Adam()
opt_a.setup(model.policy)
opt_b = optimizers.Adam()
opt_b.setup(model.q_function)
explorer = explorers.AdditiveGaussian(scale=1)
return agents.PGT(model, opt_a, opt_b, rbuf, gamma=0.99,
explorer=explorer)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
import uuid
from typing import Any, Dict, List, Optional, Tuple
import jwt
import redis
from flask import Flask, g, request, Request, Response, session
logger = logging.getLogger(__name__)
class AsyncQueryTokenException(Exception):
pass
class AsyncQueryJobException(Exception):
pass
def build_job_metadata(
channel_id: str, job_id: str, user_id: Optional[str], **kwargs: Any
) -> Dict[str, Any]:
return {
"channel_id": channel_id,
"job_id": job_id,
"user_id": int(user_id) if user_id else None,
"status": kwargs.get("status"),
"errors": kwargs.get("errors", []),
"result_url": kwargs.get("result_url"),
}
def parse_event(event_data: Tuple[str, Dict[str, Any]]) -> Dict[str, Any]:
event_id = event_data[0]
event_payload = event_data[1]["data"]
return {"id": event_id, **json.loads(event_payload)}
def increment_id(redis_id: str) -> str:
# redis stream IDs are in this format: '1607477697866-0'
try:
prefix, last = redis_id[:-1], int(redis_id[-1])
return prefix + str(last + 1)
except Exception: # pylint: disable=broad-except
return redis_id
class AsyncQueryManager:
MAX_EVENT_COUNT = 100
STATUS_PENDING = "pending"
STATUS_RUNNING = "running"
STATUS_ERROR = "error"
STATUS_DONE = "done"
def __init__(self) -> None:
super().__init__()
self._redis: redis.Redis # type: ignore
self._stream_prefix: str = ""
self._stream_limit: Optional[int]
self._stream_limit_firehose: Optional[int]
self._jwt_cookie_name: str
self._jwt_cookie_secure: bool = False
self._jwt_cookie_domain: Optional[str]
self._jwt_secret: str
def init_app(self, app: Flask) -> None:
config = app.config
if (
config["CACHE_CONFIG"]["CACHE_TYPE"] == "null"
or config["DATA_CACHE_CONFIG"]["CACHE_TYPE"] == "null"
):
raise Exception(
"""
Cache backends (CACHE_CONFIG, DATA_CACHE_CONFIG) must be configured
and non-null in order to enable async queries
"""
)
if len(config["GLOBAL_ASYNC_QUERIES_JWT_SECRET"]) < 32:
raise AsyncQueryTokenException(
"Please provide a JWT secret at least 32 bytes long"
)
self._redis = redis.Redis(
**config["GLOBAL_ASYNC_QUERIES_REDIS_CONFIG"], decode_responses=True
)
self._stream_prefix = config["GLOBAL_ASYNC_QUERIES_REDIS_STREAM_PREFIX"]
self._stream_limit = config["GLOBAL_ASYNC_QUERIES_REDIS_STREAM_LIMIT"]
self._stream_limit_firehose = config[
"GLOBAL_ASYNC_QUERIES_REDIS_STREAM_LIMIT_FIREHOSE"
]
self._jwt_cookie_name = config["GLOBAL_ASYNC_QUERIES_JWT_COOKIE_NAME"]
self._jwt_cookie_secure = config["GLOBAL_ASYNC_QUERIES_JWT_COOKIE_SECURE"]
self._jwt_cookie_domain = config["GLOBAL_ASYNC_QUERIES_JWT_COOKIE_DOMAIN"]
self._jwt_secret = config["GLOBAL_ASYNC_QUERIES_JWT_SECRET"]
@app.after_request
def validate_session(response: Response) -> Response:
user_id = None
try:
user_id = g.user.get_id()
user_id = int(user_id)
except Exception: # pylint: disable=broad-except
pass
reset_token = (
not request.cookies.get(self._jwt_cookie_name)
or "async_channel_id" not in session
or "async_user_id" not in session
or user_id != session["async_user_id"]
)
if reset_token:
async_channel_id = str(uuid.uuid4())
session["async_channel_id"] = async_channel_id
session["async_user_id"] = user_id
sub = str(user_id) if user_id else None
token = self.generate_jwt({"channel": async_channel_id, "sub": sub})
response.set_cookie(
self._jwt_cookie_name,
value=token,
httponly=True,
secure=self._jwt_cookie_secure,
domain=self._jwt_cookie_domain,
)
return response
def generate_jwt(self, data: Dict[str, Any]) -> str:
encoded_jwt = jwt.encode(data, self._jwt_secret, algorithm="HS256")
return encoded_jwt.decode("utf-8")
def parse_jwt(self, token: str) -> Dict[str, Any]:
data = jwt.decode(token, self._jwt_secret, algorithms=["HS256"])
return data
def parse_jwt_from_request(self, req: Request) -> Dict[str, Any]:
token = req.cookies.get(self._jwt_cookie_name)
if not token:
raise AsyncQueryTokenException("Token not preset")
try:
return self.parse_jwt(token)
except Exception as ex:
logger.warning(ex)
raise AsyncQueryTokenException("Failed to parse token") from ex
def init_job(self, channel_id: str, user_id: Optional[str]) -> Dict[str, Any]:
job_id = str(uuid.uuid4())
return build_job_metadata(
channel_id, job_id, user_id, status=self.STATUS_PENDING
)
def read_events(
self, channel: str, last_id: Optional[str]
) -> List[Optional[Dict[str, Any]]]:
stream_name = f"{self._stream_prefix}{channel}"
start_id = increment_id(last_id) if last_id else "-"
results = self._redis.xrange(stream_name, start_id, "+", self.MAX_EVENT_COUNT)
return [] if not results else list(map(parse_event, results))
def update_job(
self, job_metadata: Dict[str, Any], status: str, **kwargs: Any
) -> None:
if "channel_id" not in job_metadata:
raise AsyncQueryJobException("No channel ID specified")
if "job_id" not in job_metadata:
raise AsyncQueryJobException("No job ID specified")
updates = {"status": status, **kwargs}
event_data = {"data": json.dumps({**job_metadata, **updates})}
full_stream_name = f"{self._stream_prefix}full"
scoped_stream_name = f"{self._stream_prefix}{job_metadata['channel_id']}"
logger.debug("********** logging event data to stream %s", scoped_stream_name)
logger.debug(event_data)
self._redis.xadd(scoped_stream_name, event_data, "*", self._stream_limit)
self._redis.xadd(full_stream_name, event_data, "*", self._stream_limit_firehose)
|
|
from random import randint
from random import choice
from decimal import *
from enum import Enum
from money import Money
from tabulate import tabulate
CURRENCY = 'UKP'
MONEYPERPLAYERSTART = Money(amount='5000.00', currency='UKP')
MONEYBANKSTART = Money(amount='100000.00', currency='UKP')
MONEYZEROFUNDS = Money(amount='0.00', currency='UKP')
class GameStyle(Enum):
monopolyuk = 1
scrabble = 2
class MonopolyPieceStyle(Enum):
dog = 0
sheep = 1
car = 2
mobilephone = 3
class MonopolySquareStyle(Enum):
go = 1
jail = 2
freeparking = 3
communitychest = 4
chance = 5
tax = 6
property = 7
gotojail = 8
class MonopolyPropertySquareSide(Enum):
first = 1
second = 2
third = 3
fourth= 4
class MonopolyPropertyStyle(Enum):
brown = 1
lightblue = 2
pink = 3
orange = 4
red = 5
yellow = 6
green = 7
darkblue = 8
transport = 9
utilities = 10
misc = 11
class MonopolyPropertySiteAcquistionStyle(Enum):
random = 0
class MonopolyPropertyDevelopmentAcquistionStyle(Enum):
random = 0
class MonopolyBoardStyle(Enum):
uk = 0
us = 1
'''
Brown (Dark Purple)
Old Kent Road/Mediterranean Avenue
Whitechapel Road/Baltic Avenue
Light Blue
The Angel Islington/Oriental Avenue
Euston Road/Vermont Avenue
Pentonville Road/Connecticut Avenue
Pink
Pall Mall/St. Charles Place
Whitehall/States Avenue
Northumberland Avenue/Virginia Avenue
Orange
Bow Street/St. James Place
Marlborough Street/Tennessee Avenue
Vine Street/New York Avenue
Red
The Strand/Kentucky Avenue
Fleet Street/Indiana Avenue
Trafalgar Square/Illinois Avenue
Yellow
Leicester Square/Atlantic Avenue
Coventry Street/Ventnor Avenue
Piccadilly/Marvin Gardens
Green
Regent Street/Pacific Avenue
Oxford Street/North Carolina Avenue
Bond Street/Pennsylvania Avenue
Dark Blue
Park Lane/Park Place
Mayfair/Boardwalk
Stations
King's Cross Station/Reading Railroad
Marylebone Station/Pennsylvania Railroad
Fenchurch St Station/B. & O. Railroad
Liverpool Street Station/Short Line
Utilities
Electric Company
Water Works
'''
class Game:
'''Represents the game being played'''
def __init__(self, gs, cnt_players, money_per_player, money_in_bank_at_start ):
if gs not in (GameStyle.monopolyuk, ):
raise NotImplementedError("Only Monopoly is supported currently")
else:
self._gameStyle = gs
if cnt_players > len(MonopolyPieceStyle):
raise NotImplementedError("Too many players for the number of available pieces")
elif cnt_players < 2:
raise NotImplementedError("Too few players for the game rules to make sense")
else:
self._player_count = cnt_players
self.lst_of_players = []
self.board = []
self.__next_player_idx = None
self.__money_in_bank = money_in_bank_at_start;
self.__initialize_monopoly_style_game( cnt_players,
MonopolyBoardStyle.uk,
money_per_player
)
def __initialize_monopoly_style_game(self, cnt_players, board_style, money_per_player):
self.board = self.__build_board(board_style)
#Create as many players as we need
#allocating a piece to each
for i in range(cnt_players):
self.lst_of_players.append(Player(MonopolyPieceStyle(i),
money_per_player,
len(self.board),
MonopolyPropertySiteAcquistionStyle.random,
MonopolyPropertyDevelopmentAcquistionStyle.random));
self.next_player_idx = randint(0, cnt_players - 1)
def make_freeze_ready_game_dic(self, turnid):
'''
Outputs a representation of the state
of the game in a form which lends itself
to later use elsewhere
'''
lstsquares = []
lstplayers = []
for player in self.lst_of_players:
lstplayers.append({
'player': player.piece_style.name,
'funds': player.funds.format('en_UK', '###0.00'),
'position': player.position_on_board
})
for square in self.board:
pcstyle = "N/A"
if square.ownedby:
pcstyle = square.ownedby.piece_style.name
lstsquares.append({'ownedby': pcstyle, 'name': square.name, 'price': square.price.format('en_UK', '###0.00')})
return {'turnid': turnid,
'boardstate': lstsquares,
'playerstate' : lstplayers}
def board_health_check(self):
#Check Sides
for side in MonopolyPropertySquareSide:
cnt = 0
for square in self.board:
if square.square_side == side:
cnt += 1
print ("Side {0} has {1} squares".format(side.name, cnt))
print("")
#Check Square Style
dicstylecnt = {}
for style in MonopolySquareStyle:
cnt = 0
for square in self.board:
if square.square_style == style:
if style in dicstylecnt:
dicstylecnt[style] += 1
else:
dicstylecnt[style] = 1
for k, v in dicstylecnt.items():
print ("Style {0} has {1} squares".format(k.name, v))
print("")
def __build_board(self, board_style):
dicProps = {}
dicProps['uk'] = []
dicProps['us'] = []
#First Side ===========================================================================================================================
dicProps['uk'].append(Square('Go', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.go, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Go', Money(amount='0.00', currency='USD'), MonopolySquareStyle.go, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.first))
dicProps['uk'].append(Square('Old Kent Road', Money(amount='60.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.brown, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Mediterranean Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.brown, MonopolyPropertySquareSide.first))
dicProps['uk'].append(Square('Community Chest', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.communitychest, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Community Chest', Money(amount='0.00', currency='USD'), MonopolySquareStyle.communitychest, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.first))
dicProps['uk'].append(Square('Whitechapel Road', Money(amount='60.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.brown, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Baltic Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.brown, MonopolyPropertySquareSide.first))
dicProps['uk'].append(Square('Income Tax', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.tax, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Income Tax', Money(amount='0.00', currency='USD'), MonopolySquareStyle.tax, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.first))
dicProps['uk'].append(Square('Kings Cross Stations', Money(amount='200.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.transport, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Reading Railroad', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.transport, MonopolyPropertySquareSide.first))
dicProps['uk'].append(Square('The Angel Islington', Money(amount='100.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.lightblue, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Oriental Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.lightblue, MonopolyPropertySquareSide.first))
dicProps['uk'].append(Square('Chance', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.chance, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Chance', Money(amount='0.00', currency='USD'), MonopolySquareStyle.chance, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.first))
dicProps['uk'].append(Square('Euston Road', Money(amount='100.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.lightblue, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Vermont Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.lightblue, MonopolyPropertySquareSide.first))
dicProps['uk'].append(Square('Pentonville Road', Money(amount='120.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.lightblue, MonopolyPropertySquareSide.first))
dicProps['us'].append(Square('Connecticut Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.lightblue, MonopolyPropertySquareSide.first))
#Second Side ==========================================================================================================================
dicProps['uk'].append(Square('Jail', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.jail, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('Jail', Money(amount='0.00', currency='USD'), MonopolySquareStyle.jail, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.second))
dicProps['uk'].append(Square('Pall Mall', Money(amount='140.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.pink, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('St Charles Place', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.pink, MonopolyPropertySquareSide.second))
dicProps['uk'].append(Square('Electricity Company', Money(amount='150.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.utilities, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('Electricity Company', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.utilities, MonopolyPropertySquareSide.second))
dicProps['uk'].append(Square('Whitehall', Money(amount='140.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.pink, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('States Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.pink, MonopolyPropertySquareSide.second))
dicProps['uk'].append(Square('Northumberland Avenue', Money(amount='160.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.pink, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('Virginia Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.pink, MonopolyPropertySquareSide.second))
dicProps['uk'].append(Square('Marylebone Station', Money(amount='200.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.transport, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('Pennsylvania Railroad', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.transport, MonopolyPropertySquareSide.second))
dicProps['uk'].append(Square('Bow Street', Money(amount='180.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.orange, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('St James Place', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.orange, MonopolyPropertySquareSide.second))
dicProps['uk'].append(Square('Community Chest', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.communitychest, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('Community Chest', Money(amount='0.00', currency='USD'), MonopolySquareStyle.communitychest, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.second))
dicProps['uk'].append(Square('Marlborough Street', Money(amount='180.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.orange, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('Tennessee Place', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.orange, MonopolyPropertySquareSide.second))
dicProps['uk'].append(Square('Vine Street', Money(amount='200.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.orange, MonopolyPropertySquareSide.second))
dicProps['us'].append(Square('New York Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.orange, MonopolyPropertySquareSide.second))
#Third Side ===========================================================================================================================
dicProps['uk'].append(Square('Free Parking', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.freeparking, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('Free Parking', Money(amount='0.00', currency='USD'), MonopolySquareStyle.freeparking, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.third))
dicProps['uk'].append(Square('Strand', Money(amount='200.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.red, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('Kentucky Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.red, MonopolyPropertySquareSide.third))
dicProps['uk'].append(Square('Chance', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.chance, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('Chance', Money(amount='0.00', currency='USD'), MonopolySquareStyle.chance, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.third))
dicProps['uk'].append(Square('Fleet Street', Money(amount='220.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.red, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('Indiana Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.red, MonopolyPropertySquareSide.third))
dicProps['uk'].append(Square('Trafalger Square', Money(amount='240.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.red, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('Illinois Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.red, MonopolyPropertySquareSide.third))
dicProps['uk'].append(Square('Fenchurch St Station', Money(amount='200.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.transport, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('B&O Railroad', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.transport, MonopolyPropertySquareSide.third))
dicProps['uk'].append(Square('Leicester Square', Money(amount='260.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.yellow, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('Atlantic Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.yellow, MonopolyPropertySquareSide.third))
dicProps['uk'].append(Square('Coventry Street', Money(amount='260.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.yellow, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('Ventnor Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.yellow, MonopolyPropertySquareSide.third))
dicProps['uk'].append(Square('Water Works', Money(amount='150.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.utilities, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('Water Works', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.utilities, MonopolyPropertySquareSide.third))
dicProps['uk'].append(Square('Piccadilly', Money(amount='280.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.yellow, MonopolyPropertySquareSide.third))
dicProps['us'].append(Square('Marvin Gardens', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.yellow, MonopolyPropertySquareSide.third))
#Fourth Side ==========================================================================================================================
dicProps['uk'].append(Square('Go To Jail', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.gotojail, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('Go To Jail', Money(amount='0.00', currency='USD'), MonopolySquareStyle.gotojail, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.fourth))
dicProps['uk'].append(Square('Regent Street', Money(amount='300.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.green, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('Pacific Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.green, MonopolyPropertySquareSide.fourth))
dicProps['uk'].append(Square('Oxford Street', Money(amount='300.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.green, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('North Carolina Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.green, MonopolyPropertySquareSide.fourth))
dicProps['uk'].append(Square('Community Chest', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.communitychest, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('Community Chest', Money(amount='0.00', currency='USD'), MonopolySquareStyle.communitychest, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.fourth))
dicProps['uk'].append(Square('Bond Street', Money(amount='320.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.green, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('Pennsylvania Avenue', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.green, MonopolyPropertySquareSide.fourth))
dicProps['uk'].append(Square('Liverpool St Station', Money(amount='200.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.transport, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('Short Line', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.transport, MonopolyPropertySquareSide.fourth))
dicProps['uk'].append(Square('Chance', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.chance, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('Chance', Money(amount='0.00', currency='USD'), MonopolySquareStyle.chance, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.fourth))
dicProps['uk'].append(Square('Park Lane', Money(amount='350.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.darkblue, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('Park Place', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.darkblue, MonopolyPropertySquareSide.fourth))
dicProps['uk'].append(Square('Super Tax', Money(amount='0.00', currency='UKP'), MonopolySquareStyle.tax, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('Super Tax', Money(amount='0.00', currency='USD'), MonopolySquareStyle.tax, MonopolyPropertyStyle.misc, MonopolyPropertySquareSide.fourth))
dicProps['uk'].append(Square('Mayfair', Money(amount='400.00', currency='UKP'), MonopolySquareStyle.property, MonopolyPropertyStyle.darkblue, MonopolyPropertySquareSide.fourth))
dicProps['us'].append(Square('Boardwalk', Money(amount='20000.00', currency='USD'), MonopolySquareStyle.property, MonopolyPropertyStyle.darkblue, MonopolyPropertySquareSide.fourth))
return dicProps[board_style.name]
def throw_dice(self):
dice1 = randint(1, 6)
dice2 = randint(1, 6)
return {'dotcnt': dice1 + dice2, 'wasdouble' : (dice1 == dice2)}
@property
def money_in_bank(self):
return self.__money_in_bank
@money_in_bank.setter
def money_in_bank(self, value):
self.__money_in_bank = value
@property
def next_player_idx(self):
return self.__next_player_idx % len(self.lst_of_players)
@next_player_idx.setter
def next_player_idx(self, value):
self.__next_player_idx = value
@property
def current_square(self):
return self.board[self.current_player.position_on_board]
@property
def current_player(self):
return self.lst_of_players[self.next_player_idx]
def current_player_is_on_property_square(self, curr_player):
#current_square = self.board[curr_player.position_on_board]
return self.current_square.square_style == MonopolySquareStyle.property
def current_square_is_owned_by_someone_else(self, curr_player):
#current_square = self.board[curr_player.position_on_board]
#import pdb;pdb.set_trace()
if self.current_square.ownedby == None:
return False
else:
if self.current_square.ownedby == self:
return False
else:
return True
'''
TODO
Either put a tag on each Square to say who owns it
...or...
Put a list of Squares on each Player and add a Square
to that list when the Player buys it
'''
def play_a_turn(self):
'''
TODO Check we're going through players property
'''
curr_player = self.lst_of_players[self.next_player_idx]
while (True):
print("")
print("Next player about to play is {0} ".format(curr_player))
#Throw
dic_dice_throw = self.throw_dice()
#Move the next player
curr_player.move(dic_dice_throw['dotcnt'])
#
bln_diag_need_stop = False
if self.current_player_is_on_property_square(curr_player):
if self.current_square_is_owned_by_someone_else(curr_player):
curr_player.pay_rent(self.board[curr_player.position_on_board])
bln_diag_need_stop = True
else:
#Potentially buy the property
if curr_player.player_should_buy(self.board[curr_player.position_on_board]):
print("{0} about to buy {1}".format(curr_player, self.board[curr_player.position_on_board]))
self.currentplayer_buy_currentsquare()
#bln_diag_need_stop = True
#Report status
if dic_dice_throw['wasdouble'] == False:
print("{0} was thrown".format(dic_dice_throw['dotcnt']))
self.next_player_idx += 1
self.reportStatus()
if bln_diag_need_stop:
#import pdb;pdb.set_trace()
pass
break
else:
print("Double was thrown - {0} was thrown".format(dic_dice_throw['dotcnt']))
self.reportStatus()
if bln_diag_need_stop:
#import pdb;pdb.set_trace()
pass
def currentplayer_buy_currentsquare(self):
#import pdb;pdb.set_trace()
curr_player = self.lst_of_players[self.next_player_idx]
self.money_in_bank += self.current_square.price
curr_player.funds -= self.current_square.price
self.current_square.ownedby = curr_player
def reportStatus(self):
lst_player_hdrs = ['Name', 'Position', 'Funds']
lstData = []
for p in self.lst_of_players:
lstData.append([p.piece_style.name, p.position_on_board, p.funds])
print(tabulate(lstData, lst_player_hdrs, tablefmt="grid"))
print("")
lst_squares = []
for sq in self.board:
owned_by = ""
if sq.ownedby != None:
owned_by = sq.ownedby.piece_style.name
lst_squares.append([sq.name, sq.property_style.name, owned_by])
lst_square_hdrs = ['Name', 'Style', 'Owner']
print(tabulate(lst_squares, lst_square_hdrs, tablefmt="grid"))
class Board:
'''Represents the board of the game being played'''
def __init__(self, ):
pass
class Player:
'''Represents a player of the game'''
def __init__(self, pc_sty, funds, cnt_squares_on_board, site_aq_style, prop_dev_style):
self.piece_style = pc_sty
self.funds = funds
self.site_aq_style = site_aq_style
self.prop_dev_style = prop_dev_style
self.__cnt_squares_on_board = cnt_squares_on_board
self.__position_on_board = 0
def __repr__(self):
return("{0} at position {1}".format(self.piece_style.name , self.position_on_board))
@property
def position_on_board(self):
return self.__position_on_board % self.__cnt_squares_on_board
@position_on_board.setter
def position_on_board(self, value):
self.__position_on_board = value
def move(self, squares_to_move):
self.__position_on_board += squares_to_move
def player_should_buy(self, current_square):
'''
The assumption here is that the current `Square` is
a 'Property' and is available to buy
'''
if self.site_aq_style == MonopolyPropertySiteAcquistionStyle.random:
return self.__player_should_buy_random(current_square)
else:
raise Exception("Only 'MonopolyPropertySiteAcquistionStyle.random' is supported in this version")
def __player_should_buy_random(self, current_square):
'''
The assumption here is that the current `Square` is
a 'Property' and is available to buy
'''
#Does the Player have the money to buy it ?
if self.funds > current_square.price:
#Throw a coin to determine if they should buy it
return choice([True, False])
else:
return False
def is_on_property_square(self, current_square):
pass
def must_pay_rent(self):
pass
def pay_rent(self, current_square):
'''
To start with (and to keep it simple) the rent is 20%
of the price
'''
#import pdb;pdb.set_trace()
getcontext().prec = 6
rent = current_square.price * Decimal(0.1)
owner = current_square.ownedby
self.funds = self.funds - rent
print("{0} about to pay rent, {1}, to {2}".format(self, rent, owner))
if self.funds < MONEYZEROFUNDS:
raise Exception("{0} has too few funds with which to pay rent to {1}".format(self, owner))
else:
owner.funds = owner.funds + rent
class Players:
'''Represents all players of the game'''
def __init__(self, cnt):
self.lst_of_players = []
class Place:
'''Represents a position of the game'''
def __init__(self):
pass
class Square(Place):
'''
Represents a position for a game
where positions are 'squares'
(such as Monopoly)
'''
def __init__(self, name, price, square_style, property_style, square_side ):
self.name = name
self.price = price
self.square_style = square_style
self.property_style = property_style
self.square_side = square_side
self.mortaged = False
self.ownedby = None
def __repr__(self):
return "{0} - Price {1} ({2} / {3})".format(self.name, self.price, self.square_style.name, self.property_style.name)
class Piece():
'''
Represents the token belonging
to a given `Player`
'''
def __init__(self):
pass
|
|
from __future__ import unicode_literals
from datetime import datetime
from django.contrib.auth import get_user_model
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from django_core.db.models import AbstractTokenModel
from django_core.db.models.mixins.base import AbstractBaseModel
from django_core.utils.list_utils import make_obj_list
from .constants import Status
from .db.models import ShareManager
from django.conf import settings
class AbstractShare(AbstractTokenModel, AbstractBaseModel):
"""Abstract Base share object represents basic shared information for a
specific user sharing an object.
It's highly recommended that the implementing class puts a index on one of
the two options:
* object_id
* (object_id, content_type)
Fields:
* for_user: the user the object is shared with. This is optional since the
user might not exist yet.
* email: email of the user who the share was sent to if user is unknown.
* first_name: first name of the person invited
* last_name: last name of the person invited
* last_sent: date time the share was last sent.
* message: message sent to user in email.
* status: status of the share (PENDING, ACCEPTED, DECLINED, etc)
* response_dttm: the datetime the share was responded.
* content_type: the content type of the generic shared object
* object_id: the object id of the shared object
* shared_object: the object being shared.
* token: unique share token.
"""
for_user = models.ForeignKey(settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name='for_user+')
email = models.EmailField(blank=True, null=True, db_index=True,
help_text=_('Email address of the person you '
'want to share with.'))
first_name = models.CharField(max_length=100, blank=True, null=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
last_sent = models.DateTimeField(default=datetime.utcnow)
message = models.TextField(blank=True, null=True)
status = models.CharField(max_length=25,
default=Status.PENDING,
choices=Status.CHOICES)
response_dttm = models.DateTimeField(blank=True, null=True)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
shared_object = generic.GenericForeignKey('content_type', 'object_id')
objects = ShareManager()
class Meta:
abstract = True
@classmethod
def save_prep(cls, instance_or_instances):
"""Preprocess the object before the object is saved. This
automatically gets called when the save method gets called.
"""
instances = make_obj_list(instance_or_instances)
for instance in instances:
if not instance.is_pending() and not instance.response_dttm:
instance.response_dttm = datetime.utcnow()
return super(AbstractShare, cls).save_prep(
instance_or_instances=instances)
def is_accepted(self):
"""Boolean indicating if the share is accepted."""
return self.status == Status.ACCEPTED
def is_pending(self):
"""Boolean indicating if the share is pending."""
return self.status == Status.PENDING
def is_declined(self):
return self.status == Status.DECLINED
def is_inactive(self):
return self.status == Status.INACTIVE
def is_deleted(self):
return self.status == Status.DELETED
def accept(self, **kwargs):
"""Accept a share by updating the status to accepted.
:param kwargs: additional fields that needs to be updated when the
field is accepted.
"""
self.status = Status.ACCEPTED
self.response_dttm = datetime.utcnow()
for attr, value in kwargs.items():
setattr(self, attr, value)
return self.save()
def decline(self, **kwargs):
"""Accept a share by updating the status to accepted.
:param kwargs: additional fields that needs to be updated when the
field is accepted.
"""
self.status = Status.DECLINED
self.response_dttm = datetime.utcnow()
for attr, value in kwargs.items():
setattr(self, attr, value)
return self.save()
def inactivate(self, **kwargs):
"""Inactivate a share."""
self.status = Status.INACTIVE
self.response_dttm = datetime.utcnow()
for attr, value in kwargs.items():
setattr(self, attr, value)
return self.save()
def copy(self, exclude_fields=None, **override_fields):
"""Returns an unsaved copy of the object minus any fields included in
`exclude_fields`.
:param exclude_fields: fields to exclude from the copy. They will
fallback to the field default if one is given or None.
:param override_fields: kwargs with fields to override. The key is the
field name, the value is the value to set the copied object to.
"""
if exclude_fields is None:
exclude_fields = []
if 'token' not in exclude_fields:
# Token should be unique thus removed when making a copy of a share
# object.
exclude_fields.append('token')
return super(AbstractShare, self).copy(exclude_fields=exclude_fields,
**override_fields)
def get_full_name(self):
"""Gets the full name of the person the share is for. If it's a known
user (i.e. "for_user" attr is set) then the name will be pulled off
the user object.
"""
if self.for_user:
first_name = self.for_user.first_name
last_name = self.for_user.last_name
else:
first_name = self.first_name
last_name = self.last_name
return u' '.join([first_name, last_name]).strip()
def get_first_name(self):
"""Gets the first name of the person the share is for. If it's a known
user (i.e. "for_user" attr is set) then the name will be pulled off
the user object.
"""
if self.for_user:
return self.for_user.first_name
return self.first_name
def get_last_name(self):
"""Gets the last name of the person the share is for. If it's a known
user (i.e. "for_user" attr is set) then the name will be pulled off
the user object.
"""
if self.for_user:
return self.for_user.last_name
return self.last_name
def get_email(self):
"""Gets the email address for the person the share is for. It's it's
a known user (i.e. "for_user" attr is set) then the email will be
pulled off the user object.
"""
if self.for_user:
return self.for_user.email
return self.email
@python_2_unicode_compatible
class Share(AbstractShare):
"""The implementation for a shared object."""
class Meta:
ordering = ('-id',)
# Make sure you can only have 1 share per user per shared_object
unique_together = ('content_type', 'object_id', 'for_user',)
index_together = [('content_type', 'object_id')]
def __str__(self):
return str(self.id)
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from novaclient.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
if six.PY3:
return text.encode(encoding, errors).decode(incoming)
else:
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
if six.PY3:
return text.encode(encoding, errors).decode(incoming)
else:
return text.encode(encoding, errors)
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
|
|
from __future__ import division
import json
import os
import pickle
import pprint
from random import shuffle
import numpy
import patterny.synthesize.topic as _tsynthesize
from patterny.db import dao
from patterny.config import Config
from patterny.semafor.adapter import SemaforClient
from test_main import PatternyTest
from utils import SCRIPT_FOLDER, KDE_DATASET, ONE_VS_REST_FOLDER
from patterny.ml.tmodel import TopicModel
from time import time
import patterny.ml.similarity as _similarity
from patterny.recommender.analysis import AnalysisRecommendation
from patterny.analysis.summary import ProblemAnalysisTextRankSummary
from patterny.analysis.analyzer import BugAnalyses
import multiprocessing
# Global variables
n_topics = 25 # should be 100
n_top_words = 25
n_samples = None
n_features = 1500
iterations = 500
k = 3
delta = 0.4
def metrics_for(data):
# TODO: build bug analysis per bug ID
bug_analyses_file = os.path.join(SCRIPT_FOLDER, 'no_commit_msg_summarization.sav')
bug_analyses = pickle.load(open(bug_analyses_file, 'rb'))
bug_id = data['bug_id']
threshold = data['threshold']
# TODO: IDs in texperiment
if bug_id not in [244091, 158633, 269619, 300951]:
return
t = time()
print '\n[{}] Processing {} :: {}'.format(problem_similarity_threshold, i, bug_id)
print "---------------------------------------------------------------------"
current = [bug_id]
X_summarized_bugs = bug_analyses.filter([bug for bug in bug_ids if bug not in current])
y_summarized_bugs = bug_analyses.filter([bug for bug in bug_ids if bug in current])
tmodel = self.tmodel_for(X_summarized_bugs, bug_id, i)
problem_similarity_vector = _similarity.ProblemSimilarity()
problem_similarity_vector.build(X_summarized_bugs)
recommender = AnalysisRecommendation(
k, delta, n_topics,
tmodel,
problem_similarity_vector,
analysis_similarity_map
)
# # TODO: tmodel and check the topic of each one of the ids
j_precision = []
j_coverage = []
for j in xrange(0, len(y_summarized_bugs.ids)):
try:
bug_id = y_summarized_bugs.ids[j]
print '\n[{}] Processing :: p{} :: {}'.format(problem_similarity_threshold, i, bug_id)
problem = y_summarized_bugs.problems[j]
analysis = y_summarized_bugs.analyses[j]
bugs_with_similar_problems = recommender.bugs_with_similar_problems(bug_id, problem,
problem_similarity_threshold)
topics_for_similar_bugs = recommender.topics_for_similar_bugs(bugs_with_similar_problems)
top_topics, topic_indexes = recommender.top_topic_index_topic(topics_for_similar_bugs,
bugs_with_similar_problems)
precision, likelihood = recommender.metrics_for_top_topic(bug_id, analysis, top_topics)
j_precision.append(precision)
precision, likelihood = recommender.metrics_for_n_topics(bug_id, analysis, topic_indexes)
except Exception as ex:
print '>> Error on :: p{} :: {}'.format(j, ex.message)
if j_precision:
precision = numpy.average(j_precision)
i_precision.append(precision)
print '>> precision :: {:4.4f}'.format(precision)
print
print "chunk %d done in :: %0.3fs." % (i, time() - t)
print
print "---------------------------------------------------------------------"
def remap_keys(mapping):
return [{'key': k, 'value': v} for k, v in mapping.iteritems()]
class TestProblemAnalysis(PatternyTest):
def setup(self):
self.pp = pprint.PrettyPrinter(indent=2)
self.base_url = 'https://bugs.kde.org/show_bug.cgi?id={}'
with open(KDE_DATASET) as f:
self.config = Config(f)
self.client = SemaforClient(self.config)
def teardown(self):
pass
# https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
@staticmethod
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
@staticmethod
def randomize(bugs):
result = []
index_shuf = range(len(bugs))
shuffle(index_shuf)
for idx in index_shuf:
result.append(bugs[idx])
return result
@staticmethod
def X_y(bugs, i):
test = list(bugs[i])
train = []
for j in xrange(0, len(bugs)):
if j != i:
train += bugs[j]
return train, test
def test_script(self):
# Uncomment if you want to rebuild the bugs' summaries
# bug_analyses_file = os.path.join(SCRIPT_FOLDER, 'no_commit_msg_summarization.sav')
# pa_summary = ProblemAnalysisTextRankSummary(self.config)
# pa_summary.run(filter_empty_summaries=True)
# bug_analyses = BugAnalyses(pa_summary.bugs)
# pickle.dump(bug_analyses, open(bug_analyses_file, 'wb'))
bug_analyses_file = os.path.join(SCRIPT_FOLDER, 'no_commit_msg_summarization.sav')
bug_analyses = pickle.load(open(bug_analyses_file, 'rb'))
# for idx, id in enumerate(bug_analyses.ids):
# if id in _bug_analyses.bugs:
# problem = _bug_analyses.bugs[id]['problem']
# bug_analyses.problems[idx] = problem
# pickle.dump(bug_analyses, open(bug_analyses_file, 'wb'))
# TODO: look how to work with this guy later on
sentences_similarity_file = os.path.join(SCRIPT_FOLDER, 'analysis_similarity_file.json')
with open(sentences_similarity_file, 'rb') as data:
aux = json.load(data)
analysis_similarity_map = {}
for r in aux:
key = (r['key'][0], r['key'][1])
value = r['value']
analysis_similarity_map[key] = value
vector_similarity_file = os.path.join(SCRIPT_FOLDER, 'problem_vector_similarity_map.json')
with open(vector_similarity_file, 'rb') as data:
aux = json.load(data)
vector_similarity_map = {}
for r in aux:
key = (r['key'][0], r['key'][1])
value = r['value']
vector_similarity_map[key] = value
_similarity.vector_similarity_map = vector_similarity_map
# random_bug_file = os.path.join(SCRIPT_FOLDER, 'random_bug_list.sav')
# bugs = pickle.load(open(random_bug_file, 'rb'))
max_k = 10
bug_ids = bug_analyses.ids
# bug_ids = [b for b in bug_ids if b not in no_analysis]
# bugs = self.randomize(bug_ids)
# pickle.dump(bugs, open(random_bug_file, 'wb'))
# bugs = pickle.load(open(random_bug_file, 'rb'))
# print bugs[0:5]
#
# bugs_per_chunk = 43
# chunks = list(self.chunks(bugs, bugs_per_chunk))
t0 = time()
# TODO: just to get the sample of recommendations to the paper
min_threshold = 0.20
max_threshold = 0.20
threshold_increment = 0.02
self.csv_results = [
['threshold', 'n_topics', 'precision', 'coverage']
]
threshold = min_threshold
while threshold <= max_threshold:
self.recommendations_for(bug_analyses, bug_ids, analysis_similarity_map,
problem_similarity_threshold=threshold)
threshold += threshold_increment
# TODO: uncomment as soon as the big bad script finishes
# with open(sentences_similarity_file, 'wb') as outfile:
# json.dump(remap_keys(sentences_similarity_map), outfile, indent=4, sort_keys=True)
# with open(vector_similarity_file, 'wb') as outfile:
# json.dump(remap_keys(vector_similarity_map), outfile, indent=4, sort_keys=True)
print "---------------------------------------------------------------------"
print "---------------------------------------------------------------------"
print "done in %0.3fs." % ((time() - t0) / 60.0)
print
print
print
for i, result in enumerate(self.csv_results):
if i == 0:
print '{},{},{},{}'.format(
result[0], result[1], result[2], result[3]
)
else:
print '{:2.5f},{},{:2.5f},{:2.5f}'.format(
result[0], result[1], result[2], result[3]
)
def recommendations_for(self, bug_analyses, bug_ids, analysis_similarity_map, problem_similarity_threshold=0.20):
pool_size = multiprocessing.cpu_count() * 2
pool = multiprocessing.Pool(processes=pool_size)
self.zero_precision = []
self.recommended = []
max_k = 10
i_precision = []
recommendation_count = 0
total_count = 0
metrics = pool.map(metrics_for, input)
pool.close()
pool.join()
# if recommendation_count >= 2:
# break
coverage = recommendation_count / total_count
print '>> coverage :: {:4.4f}'.format(coverage)
print
print i_precision
if i_precision:
precision = numpy.average(i_precision)
print '>> precision :: {:4.4f}'.format(precision)
else:
precision = 0.0
# print '\nPARTIAL\n'
# print [problem_similarity_threshold, n_topics, precision, coverage]
self.csv_results.append(
[problem_similarity_threshold, n_topics, precision, coverage]
)
def tmodel_for(self, X_summarized_bugs, bug_id, i):
tmodel_file = os.path.join(ONE_VS_REST_FOLDER, 'tmodel_k{}_bug_{}.sav'.format(n_topics, bug_id))
if not os.path.exists(tmodel_file):
print "Building LDA model for chunk {}".format(i)
tmodel = TopicModel(n_samples=n_samples, n_features=n_features, n_topics=n_topics,
iterations=iterations,
n_top_words=n_top_words, threshold=0.3)
tmodel.build(X_summarized_bugs.analyses, debug=True, analyses=X_summarized_bugs)
pickle.dump(tmodel, open(tmodel_file, 'wb'))
tmodel = pickle.load(open(tmodel_file, 'rb'))
return tmodel
|
|
from copy import deepcopy
import pytest
from mock import MagicMock
from switchboard.module import SwitchboardModule, ModuleError
from switchboard.device import RESTDevice, get_device_suffix
def set_value_callback(device, value):
set_value_callback.values[device.name] = value
set_value_callback.values = {}
def dic(name):
''' Creates the json device definitions based on the device name '''
d = { 'name': name }
d['readable'] = True if 'i' in get_device_suffix(name) else False
d['writeable'] = True if 'o' in get_device_suffix(name) else False
return d
class ModuleTestEnv:
def __init__(self):
self.module_class = self.DefaultModule.module_class
self.module_class.enabled = True
self.device_list = {
'input1.io': RESTDevice(dic('input1.io'), 'http://url', set_value_callback),
'input2.i': RESTDevice(dic('input2.i'), 'http://url', set_value_callback),
'output1.o': RESTDevice(dic('output1.o'), 'http://url', set_value_callback),
'output2.io': RESTDevice(dic('output2.io'), 'http://url', set_value_callback)
}
@SwitchboardModule(
inputs = [ 'input1.io', 'input2.i' ],
outputs = { 'output1.o': 1, 'output2.io': None })
def DefaultModule(self, in1, in2, out1, out2):
out1.set_value(in1.get_value() - in2.get_value())
out2.set_value(in1.get_value() + in2.get_value())
def test_class_module_init():
''' Make sure the decorator is doing everything it's supposed to '''
test_env = ModuleTestEnv()
module = test_env.module_class
assert module.inputs == [ 'input1.io', 'input2.i' ]
assert module.outputs == { 'output1.o': 1, 'output2.io': None }
assert module.name == 'DefaultModule'
assert module.is_class_method == True
def test_standalone_module_init():
''' Make sure the decorator is doing everything it's supposed to '''
@SwitchboardModule(
inputs = [ 'input1.io', 'input2.i' ],
outputs = { 'output1.o': 1, 'output2.io': 2 },
static_variables = { 'abc': 123 } )
def StandaloneModule(in1, in2, out1, out2):
pass
module = StandaloneModule.module_class
assert module.inputs == [ 'input1.io', 'input2.i' ]
assert module.outputs == { 'output1.o': 1, 'output2.io': 2 }
assert module.name == 'StandaloneModule'
assert hasattr(StandaloneModule, 'abc')
assert StandaloneModule.abc == 123
assert module.is_class_method == False
def test_switchboard_class_module_with_statics_error():
''' A class switchboard module is not allowed to have statics '''
with pytest.raises(ModuleError):
class InvalidModule:
@SwitchboardModule(inputs = [], outputs = [], static_variables = { 'abc': 101 })
def StandaloneModule(self):
pass
def test_create_argument_list_success():
test_env = ModuleTestEnv()
test_env.module_class.create_argument_list(test_env.device_list)
assert test_env.module_class.enabled == True
assert test_env.module_class.error == None
# We should be able to call create_argument_list multiple times
# without errors
test_env.module_class.create_argument_list(test_env.device_list)
assert test_env.module_class.enabled == True
assert test_env.module_class.error == None
def test_create_argument_list_missing_input_device_error():
test_env = ModuleTestEnv()
del test_env.device_list['input1.io']
with pytest.raises(ModuleError):
test_env.module_class.create_argument_list(test_env.device_list)
assert test_env.module_class.enabled == False
assert test_env.module_class.error != None
def test_create_argument_list_missing_output_device_error():
test_env = ModuleTestEnv()
del test_env.device_list['output2.io']
with pytest.raises(ModuleError):
test_env.module_class.create_argument_list(test_env.device_list)
assert test_env.module_class.enabled == False
assert test_env.module_class.error != None
def test_create_argument_list_not_an_input_error():
@SwitchboardModule(inputs = [ 'output1.o' ])
def ClashingTestModule(input):
pass
module_class = ClashingTestModule.module_class
with pytest.raises(ModuleError):
module_class.create_argument_list(ModuleTestEnv().device_list)
assert module_class.enabled == False
assert module_class.error != None
def test_create_argument_list_not_an_output_error():
@SwitchboardModule(outputs = { 'input2.i': 1 })
def ClashingTestModule(out):
pass
module_class = ClashingTestModule.module_class
with pytest.raises(ModuleError):
module_class.create_argument_list(ModuleTestEnv().device_list)
assert module_class.enabled == False
assert module_class.error != None
def test_create_argument_list_multiple_drivers_error():
test_env = ModuleTestEnv()
test_env.module_class.create_argument_list(test_env.device_list)
@SwitchboardModule(outputs = { 'output1.o': 1 })
def ClashingTestModule(out):
pass
# Because ClashingTestModule also drives output1.o, creating the
# argument list should cause an error
module_class = ClashingTestModule.module_class
module_class.enabled = True
with pytest.raises(ModuleError):
module_class.create_argument_list(test_env.device_list)
assert test_env.module_class.enabled == True
assert module_class.enabled == False
assert module_class.error != None
def test_check_module_io_error():
test_env = ModuleTestEnv()
test_env.module_class.create_argument_list(test_env.device_list)
assert test_env.module_class.check_module_io_error() == False
test_env.device_list['output1.o'].error = 'Some error'
assert test_env.module_class.check_module_io_error() == True
assert test_env.module_class.error != None
test_env.device_list['output1.o'].error = 'Some other error'
assert test_env.module_class.check_module_io_error() == True
assert test_env.module_class.error != None
test_env.device_list['output1.o'].error = None
assert test_env.module_class.check_module_io_error() == False
assert test_env.module_class.error == None
def test_call_swtchboard_class_module():
test_env = ModuleTestEnv()
test_env.module_class.create_argument_list(test_env.device_list)
test_env.device_list['input1.io'].update_value(10)
test_env.device_list['input2.i'].update_value(5)
test_env.DefaultModule()
assert set_value_callback.values['output1.o'] == 5
assert set_value_callback.values['output2.io'] == 15
def test_call_swtchboard_io_error():
test_env = ModuleTestEnv()
test_env.module_class.create_argument_list(test_env.device_list)
test_env.device_list['input1.io'].update_value(10)
test_env.device_list['input2.i'].update_value(5)
test_env.DefaultModule()
assert set_value_callback.values['output1.o'] == 5
assert set_value_callback.values['output2.io'] == 15
test_env.device_list['input2.i'].error = 'Some error has occurred'
test_env.DefaultModule()
assert set_value_callback.values['output1.o'] == 1 # Set to error value
assert set_value_callback.values['output2.io'] == 15 # Unchanged as error value is None
test_env.device_list['input2.i'].error = None
test_env.DefaultModule()
assert set_value_callback.values['output1.o'] == 5
assert set_value_callback.values['output2.io'] == 15
def test_call_swtchboard_standalone_module():
test_env = ModuleTestEnv()
@SwitchboardModule(
inputs = [ 'input1.io', 'input2.i' ],
outputs = { 'output1.o': 1 },
static_variables = { 'abc': 101 })
def StandaloneModule(in1, in2, out):
out.set_value(in1.get_value() + in2.get_value() + StandaloneModule.abc)
module_class = StandaloneModule.module_class
module_class.create_argument_list(test_env.device_list)
module_class.enabled = True
test_env.device_list['input1.io'].update_value(10)
test_env.device_list['input2.i'].update_value(5)
StandaloneModule()
assert set_value_callback.values['output1.o'] == 10 + 5 + 101
|
|
"""Test the split module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from scipy.misc import comb
from itertools import combinations
from sklearn.utils.fixes import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, test_size=0.1, "
"train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, X, y, groups, expected_n_splits=None):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
if expected_n_splits is not None:
assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits)
else:
expected_n_splits = cv.get_n_splits(X, y, groups)
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_splits)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert_equal(5, KFold(5).get_n_splits(X2))
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in (False, True):
for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y):
assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
def test_shuffle_kfold_stratifiedkfold_reproducibility():
# Check that when the shuffle is True multiple split calls produce the
# same split when random_state is set
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
kf = KFold(3, shuffle=True)
skf = StratifiedKFold(3, shuffle=True)
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
try:
np.testing.assert_equal(list(cv.split(*data)),
list(cv.split(*data)))
except AssertionError:
pass
else:
raise AssertionError("The splits for data, %s, are same even "
"when random state is not set" % data)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert_not_equal(set(test0), set(test1))
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.92, mean_score)
assert_greater(mean_score, 0.80)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.93, mean_score)
assert_greater(mean_score, 0.80)
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
for typ in six.integer_types:
ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.6, 8).split(X, y))
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)])
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert_true(prob > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits_actual, n_splits)
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert_equal(len(train), n_train)
assert_equal(len(test), n_test)
assert_equal(len(set(train).intersection(test)), 0)
group_counts = np.unique(groups)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(n_train + n_test, len(groups))
assert_equal(len(group_counts), 2)
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(iter(sss.split(X=X, y=y)))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert_equal(len(np.unique(folds)), ps.get_n_splits())
for train_ind, test_ind in ps.split():
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
groups = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4])]
for l in groups:
X = y = np.ones(len(l))
n_splits = 6
test_size = 1./3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(slo.get_n_splits(X, y, groups=l), n_splits)
l_unique = np.unique(l)
for train, test in slo.split(X, y, groups=l):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert_false(np.any(np.in1d(l[train], l_test_unique)))
assert_false(np.any(np.in1d(l[test], l_train_unique)))
# Second test: train and test add up to all the data
assert_equal(l[train].size + l[test].size, l.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert_true(abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1)
assert_true(abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert_equal(3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y, groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y, groups))
def test_train_test_split_errors():
assert_raises(ValueError, train_test_split)
assert_raises(ValueError, train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
@ignore_warnings
def train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert_true(isinstance(X_train, csr_matrix))
assert_true(isinstance(X_test, csr_matrix))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = train_test_split(X_df)
def test_shufflesplit_errors():
# When the {test|train}_size is a float/invalid, error is raised at init
assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None)
assert_raises(ValueError, ShuffleSplit, test_size=2.0)
assert_raises(ValueError, ShuffleSplit, test_size=1.0)
assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95)
assert_raises(ValueError, ShuffleSplit, train_size=1j)
# When the {test|train}_size is an int, validation is based on the input X
# and happens at split(...)
assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=8,
train_size=3).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
# Check if the old style classes are wrapped to have a split method
X = np.ones(9)
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv1 = check_cv(3, y_multiclass, classifier=True)
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv2 = check_cv(OldSKF(y_multiclass, n_folds=3))
np.testing.assert_equal(list(cv1.split(X, y_multiclass)),
list(cv2.split()))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv = OldSKF(y_multiclass, n_folds=3)
wrapped_old_skf = _CVIterableWrapper(cv)
# Check if split works correctly
np.testing.assert_equal(list(cv), list(wrapped_old_skf.split()))
# Check if get_n_splits works correctly
assert_equal(len(cv), wrapped_old_skf.get_n_splits())
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert_equal(n_splits_actual, tscv.get_n_splits())
assert_equal(n_splits_actual, 2)
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv)
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Digital Ocean Driver
"""
from libcloud.utils.py3 import httplib
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.compute.types import Provider, NodeState, InvalidCredsError
from libcloud.compute.base import NodeDriver
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
class DigitalOceanResponse(JsonResponse):
def parse_error(self):
if self.status == httplib.FOUND and '/api/error' in self.body:
# Hacky, but DigitalOcean error responses are awful
raise InvalidCredsError(self.body)
elif self.status == httplib.UNAUTHORIZED:
body = self.parse_body()
raise InvalidCredsError(body['message'])
class SSHKey(object):
def __init__(self, id, name, pub_key):
self.id = id
self.name = name
self.pub_key = pub_key
def __repr__(self):
return (('<SSHKey: id=%s, name=%s, pub_key=%s>') %
(self.id, self.name, self.pub_key))
class DigitalOceanConnection(ConnectionUserAndKey):
"""
Connection class for the DigitalOcean driver.
"""
host = 'api.digitalocean.com'
responseCls = DigitalOceanResponse
def add_default_params(self, params):
"""
Add parameters that are necessary for every request
This method adds C{api_key} and C{api_responseFormat} to
the request.
"""
params['client_id'] = self.user_id
params['api_key'] = self.key
return params
class DigitalOceanNodeDriver(NodeDriver):
"""
DigitalOceanNode node driver.
"""
connectionCls = DigitalOceanConnection
type = Provider.DIGITAL_OCEAN
name = 'Digital Ocean'
website = 'https://www.digitalocean.com'
NODE_STATE_MAP = {'new': NodeState.PENDING,
'off': NodeState.REBOOTING,
'active': NodeState.RUNNING}
def list_nodes(self):
data = self.connection.request('/droplets').object['droplets']
return list(map(self._to_node, data))
def list_locations(self):
data = self.connection.request('/regions').object['regions']
return list(map(self._to_location, data))
def list_images(self):
data = self.connection.request('/images').object['images']
return list(map(self._to_image, data))
def list_sizes(self):
data = self.connection.request('/sizes').object['sizes']
return list(map(self._to_size, data))
def create_node(self, name, size, image, location, ex_ssh_key_ids=None,
**kwargs):
"""
Create a node.
@keyword ex_ssh_key_ids: A list of ssh key ids which will be added
to the server. (optional)
@type ex_ssh_key_ids: C{list} of C{str}
@return: The newly created node.
@rtype: L{Node}
"""
params = {'name': name, 'size_id': size.id, 'image_id': image.id,
'region_id': location.id}
if ex_ssh_key_ids:
params['ssh_key_ids'] = ','.join(ex_ssh_key_ids)
data = self.connection.request('/droplets/new', params=params).object
return self._to_node(data=data['droplet'])
def reboot_node(self, node):
res = self.connection.request('/droplets/%s/reboot/' % (node.id))
return res.status == httplib.OK
def destroy_node(self, node):
res = self.connection.request('/droplets/%s/destroy/' % (node.id))
return res.status == httplib.OK
def ex_list_ssh_keys(self):
"""
List all the available SSH keys.
@return: Available SSH keys.
@rtype: C{list} of L{SSHKey}
"""
data = self.connection.request('/ssh_keys').object['ssh_keys']
return list(map(self._to_ssh_key, data))
def ex_create_ssh_key(self, name, ssh_key_pub):
"""
Create a new SSH key.
@param name: Key name (required)
@type name: C{str}
@param name: Valid public key string (required)
@type name: C{str}
"""
params = {'name': name, 'ssh_pub_key': ssh_key_pub}
data = self.connection.request('/ssh_keys/new/', method='GET',
params=params).object
assert 'ssh_key' in data
return self._to_ssh_key(data=data['ssh_key'])
def ex_destroy_ssh_key(self, key_id):
"""
Delete an existing SSH key.
@param key_id: SSH key id (required)
@type key_id: C{str}
"""
res = self.connection.request('/ssh_keys/%s/destroy/' % (key_id))
return res.status == httplib.OK
def _to_node(self, data):
extra_keys = ['backups_active', 'region_id']
if 'status' in data:
state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
else:
state = NodeState.UNKNOWN
if 'ip_address' in data and data['ip_address'] is not None:
public_ips = [data['ip_address']]
else:
public_ips = []
extra = {}
for key in extra_keys:
if key in data:
extra[key] = data[key]
node = Node(id=data['id'], name=data['name'], state=state,
public_ips=public_ips, private_ips=None, extra=extra,
driver=self)
return node
def _to_image(self, data):
extra = {'distribution': data['distribution']}
return NodeImage(id=data['id'], name=data['name'], extra=extra,
driver=self)
def _to_location(self, data):
return NodeLocation(id=data['id'], name=data['name'], country=None,
driver=self)
def _to_size(self, data):
ram = data['name'].lower()
if 'mb' in ram:
ram = int(ram.replace('mb', ''))
elif 'gb' in ram:
ram = int(ram.replace('gb', '')) * 1024
return NodeSize(id=data['id'], name=data['name'], ram=ram, disk=0,
bandwidth=0, price=0, driver=self)
def _to_ssh_key(self, data):
return SSHKey(id=data['id'], name=data['name'],
pub_key=data.get('ssh_pub_key', None))
|
|
#!/usr/bin/env python
#gtkwin32.py
__version__ = '1.01'
import sys
import win32gui
import winxpgui
GWL_WNDPROC = -4
GWL_EXSTYLE = -20
IDI_APPLICATION = 32512
LWA_ALPHA = 0x02
WM_TASKBARCREATED = win32gui.RegisterWindowMessage('TaskbarCreated')
WM_USER = 1024
WM_TRAYMESSAGE = WM_USER + 20
WS_EX_LAYERED = 0x80000
class GTKWin32Ext:
def __init__(self, gtk_window):
self._window = gtk_window
self._hwnd = gtk_window.window.handle
self._message_map = {}
# Windows transparency is only supported windows2000 and above.
if sys.getwindowsversion()[0] <= 4:
self.alpha = None
else:
self.alpha = 100
self._transparency = False
self.notify_icon = None
# Sublass the window and inject a WNDPROC to process messages.
self._oldwndproc = win32gui.SetWindowLong(self._hwnd, GWL_WNDPROC,
self._wndproc)
gtk_window.connect('unrealize', self.remove)
def add_notify_icon(self, hicon=None, tooltip=None):
""" Creates a notify icon for the gtk window. """
if not self.notify_icon:
if not hicon:
hicon = win32gui.LoadIcon(0, IDI_APPLICATION)
self.notify_icon = NotifyIcon(self._hwnd, hicon, tooltip)
# Makes redraw if the taskbar is restarted.
self.message_map({WM_TASKBARCREATED: self.notify_icon._redraw})
def message_map(self, msg_map={}):
""" Maps message processing to callback functions ala win32gui. """
if msg_map:
if self._message_map:
duplicatekeys = [key for key in msg_map.keys()
if self._message_map.has_key(key)]
for key in duplicatekeys:
new_value = msg_map[key]
if isinstance(new_value, list):
raise TypeError('Dict cannot have list values')
value = self._message_map[key]
if new_value != value:
new_value = [new_value]
if isinstance(value, list):
value += new_value
else:
value = [value] + new_value
msg_map[key] = value
self._message_map.update(msg_map)
def message_unmap(self, msg, callback=None):
if self._message_map.has_key(msg):
if callback:
cblist = self._message_map[key]
if isinstance(cblist, list):
if not len(cblist) < 2:
for i in range(len(cblist)):
if cblist[i] == callback:
del self._message_map[key][i]
return
del self._message_map[key]
def remove(self, *args):
self._message_map = {}
self.remove_notify_icon()
self = None
def remove_notify_icon(self):
""" Removes the notify icon. """
if self.notify_icon:
self.notify_icon.remove()
self.notify_icon = None
def remove(self, *args):
""" Unloads the extensions. """
self._message_map = {}
self.remove_notify_icon()
self = None
def show_balloon_tooltip(self, title, text, timeout=10,
icon=win32gui.NIIF_NONE):
""" Shows a baloon tooltip. """
if not self.notify_icon:
self.add_notifyicon()
self.notify_icon.show_balloon(title, text, timeout, icon)
def set_alpha(self, alpha=100, colorkey=0, mask=False):
""" Sets the transparency of the window. """
if self.alpha != None:
if not self._transparency:
style = win32gui.GetWindowLong(self._hwnd, GWL_EXSTYLE)
if (style & WS_EX_LAYERED) != WS_EX_LAYERED:
style = style | WS_EX_LAYERED
win32gui.SetWindowLong(self._hwnd, GWL_EXSTYLE, style)
self._transparency = True
if mask and colorkey:
flags = LWA_COLORKEY
else:
flags = LWA_ALPHA
if colorkey:
flags = flags | LWA_COLORKEY
win_alpha = int(float(alpha)/100*255)
winxpgui.SetLayeredWindowAttributes(self._hwnd, colorkey,
win_alpha, flags)
self.alpha = int(alpha)
def _wndproc(self, hwnd, msg, wparam, lparam):
""" A WINDPROC to process window messages. """
if self._message_map.has_key(msg):
callback = self._message_map[msg]
if isinstance(callback, list):
for cb in callback:
apply(cb, (hwnd, msg, wparam, lparam))
else:
apply(callback, (hwnd, msg, wparam, lparam))
return win32gui.CallWindowProc(self._oldwndproc, hwnd, msg, wparam,
lparam)
class NotifyIcon:
def __init__(self, hwnd, hicon, tooltip=None):
self._hwnd = hwnd
self._id = 0
self._flags = win32gui.NIF_MESSAGE | win32gui.NIF_ICON
self._callbackmessage = WM_TRAYMESSAGE
self._hicon = hicon
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, self._get_nid())
if tooltip: self.set_tooltip(tooltip)
def _get_nid(self):
""" Function to initialise & retrieve the NOTIFYICONDATA Structure. """
nid = (self._hwnd, self._id, self._flags, self._callbackmessage,
self._hicon)
nid = list(nid)
if not hasattr(self, '_tip'): self._tip = ''
nid.append(self._tip)
if not hasattr(self, '_info'): self._info = ''
nid.append(self._info)
if not hasattr(self, '_timeout'): self._timeout = 0
nid.append(self._timeout)
if not hasattr(self, '_infotitle'): self._infotitle = ''
nid.append(self._infotitle)
if not hasattr(self, '_infoflags'):self._infoflags = win32gui.NIIF_NONE
nid.append(self._infoflags)
return tuple(nid)
def remove(self):
""" Removes the tray icon. """
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, self._get_nid())
def set_tooltip(self, tooltip):
""" Sets the tray icon tooltip. """
self._flags = self._flags | win32gui.NIF_TIP
self._tip = tooltip
win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, self._get_nid())
def show_balloon(self, title, text, timeout=10, icon=win32gui.NIIF_NONE):
""" Shows a balloon tooltip from the tray icon. """
self._flags = self._flags | win32gui.NIF_INFO
self._infotitle = title
self._info = text
self._timeout = timeout * 1000
self._infoflags = icon
win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, self._get_nid())
def _redraw(self, *args):
""" Redraws the tray icon. """
self.remove
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, self._get_nid())
if __name__ == '__main__':
# Example on how to use the module.
import gtk
import gobject
import time
WM_LBUTTONUP = 0x0202
WM_RBUTTONUP = 0x0205
class GTKApp:
def __init__(self):
self.main_loop = gobject.MainLoop()
# Create a window with a horizontal scale.
self.wnd = gtk.Window()
self.wnd.set_default_size(640, 480)
self.wnd.set_title('Have fun with the transparency slider')
hscale = gtk.HScale()
hscale.set_digits(0)
hscale.set_increments(1, 10)
hscale.set_range(0, 100)
hscale.set_value(100)
hscale.connect('value_changed', self.set_window_alpha)
self.wnd.add(hscale)
# Note: gtk window must be realized before installing extensions.
self.wnd.realize()
self.wnd.show_all()
self.win32ext = GTKWin32Ext(self.wnd)
self.win32ext.add_notify_icon()
# GTK menus from the notify icon!
menu = gtk.Menu()
menu_item = gtk.MenuItem('Baloons!')
menu_item.connect_object('activate', self.menu_cb, self.wnd)
menu.append(menu_item)
menu_item = gtk.MenuItem('Fadeout Window')
menu_item.connect('activate', self.fadeoutwindow)
menu.append(menu_item)
menu_item = gtk.MenuItem('Window Disappeared?')
menu_item.connect('activate', self.fadeinwindow)
menu.append(menu_item)
menu.show_all()
self.win32ext.notify_icon.menu = menu
# Set up the callback messages
self.win32ext.message_map({
WM_TRAYMESSAGE: self.on_notifyicon_activity
})
def set_window_alpha(self, hscale):
self.win32ext.set_alpha(hscale.get_value())
def fadeinwindow(self, *args):
while(self.win32ext.alpha < 100):
self.win32ext.set_alpha(self.win32ext.alpha + 1)
time.sleep(0.01)
def fadeoutwindow(self, *args):
while(self.win32ext.alpha != 0):
self.win32ext.set_alpha(self.win32ext.alpha - 1)
time.sleep(0.01)
def menu_cb(self, data):
self.win32ext.show_balloon_tooltip(
'pyGTK Win32 Extensions', 'No more MFC from today!')
def on_notifyicon_activity(self, hwnd, message, wparam, lparam):
if lparam == WM_RBUTTONUP:
self.win32ext.notify_icon.menu.popup(None, None, None, 0, 0)
elif lparam == WM_LBUTTONUP:
self.win32ext.notify_icon.menu.popdown()
def quit(self, *args):
self.win32ext.remove_notify_icon()
gtk.main_quit()
gtkapp = GTKApp()
gtkapp.main_loop.run()
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import logging
from fractions import Fraction
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.ewald import EwaldSummation, EwaldMinimizer
from pymatgen.analysis.elasticity.strain import Deformation
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.transformations.site_transformations import \
PartialRemoveSitesTransformation
from pymatgen.transformations.transformation_abc import AbstractTransformation
"""
This module defines standard transformations which transforms a structure into
another structure. Standard transformations operate in a structure-wide manner,
rather than site-specific manner.
All transformations should inherit the AbstractTransformation ABC.
"""
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Sep 23, 2011"
logger = logging.getLogger(__name__)
class RotationTransformation(AbstractTransformation):
"""
The RotationTransformation applies a rotation to a structure.
Args:
axis (3x1 array): Axis of rotation, e.g., [1, 0, 0]
angle (float): Angle to rotate
angle_in_radians (bool): Set to True if angle is supplied in radians.
Else degrees are assumed.
"""
def __init__(self, axis, angle, angle_in_radians=False):
"""
"""
self.axis = axis
self.angle = angle
self.angle_in_radians = angle_in_radians
self._symmop = SymmOp.from_axis_angle_and_translation(
self.axis, self.angle, self.angle_in_radians)
def apply_transformation(self, structure):
s = structure.copy()
s.apply_operation(self._symmop)
return s
def __str__(self):
return "Rotation Transformation about axis " + \
"{} with angle = {:.4f} {}".format(
self.axis, self.angle,
"radians" if self.angle_in_radians else "degrees")
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return RotationTransformation(self.axis, -self.angle,
self.angle_in_radians)
@property
def is_one_to_many(self):
return False
class OxidationStateDecorationTransformation(AbstractTransformation):
"""
This transformation decorates a structure with oxidation states.
Args:
oxidation_states (dict): Oxidation states supplied as a dict,
e.g., {"Li":1, "O":-2}
"""
def __init__(self, oxidation_states):
self.oxidation_states = oxidation_states
def apply_transformation(self, structure):
s = structure.copy()
s.add_oxidation_state_by_element(self.oxidation_states)
return s
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class AutoOxiStateDecorationTransformation(AbstractTransformation):
"""
This transformation automatically decorates a structure with oxidation
states using a bond valence approach.
Args:
symm_tol (float): Symmetry tolerance used to determine which sites are
symmetrically equivalent. Set to 0 to turn off symmetry.
max_radius (float): Maximum radius in Angstrom used to find nearest
neighbors.
max_permutations (int): Maximum number of permutations of oxidation
states to test.
distance_scale_factor (float): A scale factor to be applied. This is
useful for scaling distances, esp in the case of
calculation-relaxed structures, which may tend to under (GGA) or
over bind (LDA). The default of 1.015 works for GGA. For
experimental structure, set this to 1.
"""
def __init__(self, symm_tol=0.1, max_radius=4, max_permutations=100000,
distance_scale_factor=1.015):
self.symm_tol = symm_tol
self.max_radius = max_radius
self.max_permutations = max_permutations
self.distance_scale_factor = distance_scale_factor
self.analyzer = BVAnalyzer(symm_tol, max_radius, max_permutations,
distance_scale_factor)
def apply_transformation(self, structure):
return self.analyzer.get_oxi_state_decorated_structure(structure)
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class OxidationStateRemovalTransformation(AbstractTransformation):
"""
This transformation removes oxidation states from a structure.
"""
def __init__(self):
pass
def apply_transformation(self, structure):
s = structure.copy()
s.remove_oxidation_states()
return s
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class SupercellTransformation(AbstractTransformation):
"""
The RotationTransformation applies a rotation to a structure.
Args:
scaling_matrix: A matrix of transforming the lattice vectors.
Defaults to the identity matrix. Has to be all integers. e.g.,
[[2,1,0],[0,3,0],[0,0,1]] generates a new structure with
lattice vectors a" = 2a + b, b" = 3b, c" = c where a, b, and c
are the lattice vectors of the original structure.
"""
def __init__(self, scaling_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1))):
self.scaling_matrix = scaling_matrix
@staticmethod
def from_scaling_factors(scale_a=1, scale_b=1, scale_c=1):
"""
Convenience method to get a SupercellTransformation from a simple
series of three numbers for scaling each lattice vector. Equivalent to
calling the normal with [[scale_a, 0, 0], [0, scale_b, 0],
[0, 0, scale_c]]
Args:
scale_a: Scaling factor for lattice direction a. Defaults to 1.
scale_b: Scaling factor for lattice direction b. Defaults to 1.
scale_c: Scaling factor for lattice direction c. Defaults to 1.
Returns:
SupercellTransformation.
"""
return SupercellTransformation([[scale_a, 0, 0], [0, scale_b, 0],
[0, 0, scale_c]])
def apply_transformation(self, structure):
return structure * self.scaling_matrix
def __str__(self):
return "Supercell Transformation with scaling matrix " + \
"{}".format(self.scaling_matrix)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
raise NotImplementedError()
@property
def is_one_to_many(self):
return False
class SubstitutionTransformation(AbstractTransformation):
"""
This transformation substitutes species for one another.
Args:
species_map: A dict or list of tuples containing the species mapping in
string-string pairs. E.g., {"Li":"Na"} or [("Fe2+","Mn2+")].
Multiple substitutions can be done. Overloaded to accept
sp_and_occu dictionary E.g. {"Si: {"Ge":0.75, "C":0.25}},
which substitutes a single species with multiple species to
generate a disordered structure.
"""
def __init__(self, species_map):
self.species_map = species_map
self._species_map = dict(species_map)
for k, v in self._species_map.items():
if isinstance(v, (tuple, list)):
self._species_map[k] = dict(v)
def apply_transformation(self, structure):
species_map = {}
for k, v in self._species_map.items():
if isinstance(v, dict):
value = {get_el_sp(x): y for x, y in v.items()}
else:
value = get_el_sp(v)
species_map[get_el_sp(k)] = value
s = structure.copy()
s.replace_species(species_map)
return s
def __str__(self):
return "Substitution Transformation :" + \
", ".join([str(k) + "->" + str(v)
for k, v in self._species_map.items()])
def __repr__(self):
return self.__str__()
@property
def inverse(self):
inverse_map = {v: k for k, v in self._species_map.items()}
return SubstitutionTransformation(inverse_map)
@property
def is_one_to_many(self):
return False
class RemoveSpeciesTransformation(AbstractTransformation):
"""
Remove all occurrences of some species from a structure.
Args:
species_to_remove: List of species to remove. E.g., ["Li", "Mn"]
"""
def __init__(self, species_to_remove):
self.species_to_remove = species_to_remove
def apply_transformation(self, structure):
s = structure.copy()
for sp in self.species_to_remove:
s.remove_species([get_el_sp(sp)])
return s
def __str__(self):
return "Remove Species Transformation :" + \
", ".join(self.species_to_remove)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class PartialRemoveSpecieTransformation(AbstractTransformation):
"""
Remove fraction of specie from a structure.
Requires an oxidation state decorated structure for ewald sum to be
computed.
Given that the solution to selecting the right removals is NP-hard, there
are several algorithms provided with varying degrees of accuracy and speed.
Please see
:class:`pymatgen.transformations.site_transformations.PartialRemoveSitesTransformation`.
Args:
specie_to_remove: Specie to remove. Must have oxidation state E.g.,
"Li+"
fraction_to_remove: Fraction of specie to remove. E.g., 0.5
algo: This parameter allows you to choose the algorithm to perform
ordering. Use one of PartialRemoveSpecieTransformation.ALGO_*
variables to set the algo.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
ALGO_ENUMERATE = 3
def __init__(self, specie_to_remove, fraction_to_remove, algo=ALGO_FAST):
"""
"""
self.specie_to_remove = specie_to_remove
self.fraction_to_remove = fraction_to_remove
self.algo = algo
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply the transformation.
Args:
structure: input structure
return_ranked_list (bool/int): Boolean stating whether or not
multiple structures are returned. If return_ranked_list is
an int, that number of structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
sp = get_el_sp(self.specie_to_remove)
specie_indices = [i for i in range(len(structure))
if structure[i].species_and_occu ==
Composition({sp: 1})]
trans = PartialRemoveSitesTransformation([specie_indices],
[self.fraction_to_remove],
algo=self.algo)
return trans.apply_transformation(structure, return_ranked_list)
@property
def is_one_to_many(self):
return True
def __str__(self):
spec_str = ["Species = {}".format(self.specie_to_remove),
"Fraction to remove = {}".format(self.fraction_to_remove),
"ALGO = {}".format(self.algo)]
return "PartialRemoveSpecieTransformation : " + ", ".join(spec_str)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
class OrderDisorderedStructureTransformation(AbstractTransformation):
"""
Order a disordered structure. The disordered structure must be oxidation
state decorated for ewald sum to be computed. No attempt is made to perform
symmetry determination to reduce the number of combinations.
Hence, attempting to performing ordering on a large number of disordered
sites may be extremely expensive. The time scales approximately with the
number of possible combinations. The algorithm can currently compute
approximately 5,000,000 permutations per minute.
Also, simple rounding of the occupancies are performed, with no attempt
made to achieve a target composition. This is usually not a problem for
most ordering problems, but there can be times where rounding errors may
result in structures that do not have the desired composition.
This second step will be implemented in the next iteration of the code.
If multiple fractions for a single species are found for different sites,
these will be treated separately if the difference is above a threshold
tolerance. currently this is .1
For example, if a fraction of .25 Li is on sites 0,1,2,3 and .5 on sites
4, 5, 6, 7 then 1 site from [0,1,2,3] will be filled and 2 sites from [4,5,6,7]
will be filled, even though a lower energy combination might be found by
putting all lithium in sites [4,5,6,7].
USE WITH CARE.
Args:
algo (int): Algorithm to use.
symmetrized_structures (bool): Whether the input structures are
instances of SymmetrizedStructure, and that their symmetry
should be used for the grouping of sites.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
def __init__(self, algo=ALGO_FAST, symmetrized_structures=False):
self.algo = algo
self._all_structures = []
self.symmetrized_structures = symmetrized_structures
def apply_transformation(self, structure, return_ranked_list=False):
"""
For this transformation, the apply_transformation method will return
only the ordered structure with the lowest Ewald energy, to be
consistent with the method signature of the other transformations.
However, all structures are stored in the all_structures attribute in
the transformation object for easy access.
Args:
structure: Oxidation state decorated disordered structure to order
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
num_to_return = max(1, num_to_return)
equivalent_sites = []
exemplars = []
# generate list of equivalent sites to order
# equivalency is determined by sp_and_occu and symmetry
# if symmetrized structure is true
for i, site in enumerate(structure):
if site.is_ordered:
continue
for j, ex in enumerate(exemplars):
sp = ex.species_and_occu
if not site.species_and_occu.almost_equals(sp):
continue
if self.symmetrized_structures:
sym_equiv = structure.find_equivalent_sites(ex)
sym_test = site in sym_equiv
else:
sym_test = True
if sym_test:
equivalent_sites[j].append(i)
break
else:
equivalent_sites.append([i])
exemplars.append(site)
# generate the list of manipulations and input structure
s = Structure.from_sites(structure)
m_list = []
for g in equivalent_sites:
total_occupancy = sum([structure[i].species_and_occu for i in g],
Composition())
total_occupancy = dict(total_occupancy.items())
# round total occupancy to possible values
for k, v in total_occupancy.items():
if abs(v - round(v)) > 0.25:
raise ValueError("Occupancy fractions not consistent "
"with size of unit cell")
total_occupancy[k] = int(round(v))
# start with an ordered structure
initial_sp = max(total_occupancy.keys(),
key=lambda x: abs(x.oxi_state))
for i in g:
s[i] = initial_sp
# determine the manipulations
for k, v in total_occupancy.items():
if k == initial_sp:
continue
m = [k.oxi_state / initial_sp.oxi_state if initial_sp.oxi_state
else 0, v, list(g), k]
m_list.append(m)
# determine the number of empty sites
empty = len(g) - sum(total_occupancy.values())
if empty > 0.5:
m_list.append([0, empty, list(g), None])
matrix = EwaldSummation(s).total_energy_matrix
ewald_m = EwaldMinimizer(matrix, m_list, num_to_return, self.algo)
self._all_structures = []
lowest_energy = ewald_m.output_lists[0][0]
num_atoms = sum(structure.composition.values())
for output in ewald_m.output_lists:
s_copy = s.copy()
# do deletions afterwards because they screw up the indices of the
# structure
del_indices = []
for manipulation in output[1]:
if manipulation[1] is None:
del_indices.append(manipulation[0])
else:
s_copy[manipulation[0]] = manipulation[1]
s_copy.remove_sites(del_indices)
self._all_structures.append(
{"energy": output[0],
"energy_above_minimum":
(output[0] - lowest_energy) / num_atoms,
"structure": s_copy.get_sorted_structure()})
if return_ranked_list:
return self._all_structures
else:
return self._all_structures[0]["structure"]
def __str__(self):
return "Order disordered structure transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@property
def lowest_energy_structure(self):
return self._all_structures[0]["structure"]
class PrimitiveCellTransformation(AbstractTransformation):
"""
This class finds the primitive cell of the input structure.
It returns a structure that is not necessarily orthogonalized
Author: Will Richards
Args:
tolerance (float): Tolerance for each coordinate of a particular
site. For example, [0.5, 0, 0.5] in cartesian coordinates will be
considered to be on the same coordinates as [0, 0, 0] for a
tolerance of 0.5. Defaults to 0.5.
"""
def __init__(self, tolerance=0.5):
self.tolerance = tolerance
def apply_transformation(self, structure):
"""
Returns most primitive cell for structure.
Args:
structure: A structure
Returns:
The most primitive structure found. The returned structure is
guaranteed to have len(new structure) <= len(structure).
"""
return structure.get_primitive_structure(tolerance=self.tolerance)
def __str__(self):
return "Primitive cell transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class PerturbStructureTransformation(AbstractTransformation):
"""
This transformation perturbs a structure by a specified distance in random
directions. Used for breaking symmetries.
Args:
amplitude (float): Amplitude of perturbation in angstroms. All sites
will be perturbed by exactly that amplitude in a random direction.
"""
def __init__(self, amplitude=0.01):
self.amplitude = amplitude
def apply_transformation(self, structure):
s = structure.copy()
s.perturb(self.amplitude)
return s
def __str__(self):
return "PerturbStructureTransformation : " + \
"Amplitude = {}".format(self.amplitude)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class DeformStructureTransformation(AbstractTransformation):
"""
This transformation deforms a structure by a deformation gradient matrix
Args:
deformation (array): deformation gradient for the transformation
"""
def __init__(self, deformation):
self.deformation = Deformation(deformation)
def apply_transformation(self, structure):
return self.deformation.apply_to_structure(structure)
def __str__(self):
return "DeformStructureTransformation : " + \
"Deformation = {}".format(str(self.deformation.tolist()))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return DeformStructureTransformation(self.deformation.inv())
@property
def is_one_to_many(self):
return False
class DiscretizeOccupanciesTransformation(AbstractTransformation):
"""
Discretizes the site occupancies in a disordered structure; useful for
grouping similar structures or as a pre-processing step for order-disorder
transformations.
Args:
max_denominator:
An integer maximum denominator for discretization. A higher
denominator allows for finer resolution in the site occupancies.
tol:
A float that sets the maximum difference between the original and
discretized occupancies before throwing an error. The maximum
allowed difference is calculated as 1/max_denominator * 0.5 * tol.
A tol of 1.0 indicates to try to accept all discretizations.
"""
def __init__(self, max_denominator=5, tol=0.25):
self.max_denominator = max_denominator
self.tol = tol
def apply_transformation(self, structure):
"""
Discretizes the site occupancies in the structure.
Args:
structure: disordered Structure to discretize occupancies
Returns:
A new disordered Structure with occupancies discretized
"""
if structure.is_ordered:
return structure
species = [dict(sp) for sp in structure.species_and_occu]
for sp in species:
for k, v in sp.items():
old_occ = sp[k]
new_occ = float(
Fraction(old_occ).limit_denominator(self.max_denominator))
if round(abs(old_occ - new_occ), 6) > (
1 / self.max_denominator / 2) * self.tol:
raise RuntimeError(
"Cannot discretize structure within tolerance!")
sp[k] = new_occ
return Structure(structure.lattice, species, structure.frac_coords)
def __str__(self):
return "DiscretizeOccupanciesTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
|
|
#!/usr/bin/env python
################################################################################
#
# proxy_logger.py
#
#
# Copyright (c) 10/9/2009 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
"""
****************************************************************************
Create proxy for logging for use with multiprocessing
****************************************************************************
These can be safely sent (marshalled) across process boundaries
===========
Example 1
===========
Set up logger from config file::
from proxy_logger import *
args={}
args["config_file"] = "/my/config/file"
(logger_proxy,
logging_mutex) = make_shared_logger_and_proxy (setup_std_shared_logger,
"my_logger", args)
===========
Example 2
===========
Log to file ``"/my/lg.log"`` in the specified format (Time / Log name / Event type / Message).
Delay file creation until first log.
Only log ``Debug`` messages
Other alternatives for the logging threshold (``args["level"]``) include
* ``logging.DEBUG``
* ``logging.INFO``
* ``logging.WARNING``
* ``logging.ERROR``
* ``logging.CRITICAL``
::
from proxy_logger import *
args={}
args["file_name"] = "/my/lg.log"
args["formatter"] = "%(asctime)s - %(name)s - %(levelname)6s - %(message)s"
args["delay"] = True
args["level"] = logging.DEBUG
(logger_proxy,
logging_mutex) = make_shared_logger_and_proxy (setup_std_shared_logger,
"my_logger", args)
===========
Example 3
===========
Rotate log files every 20 Kb, with up to 10 backups.
::
from proxy_logger import *
args={}
args["file_name"] = "/my/lg.log"
args["rotating"] = True
args["maxBytes"]=20000
args["backupCount"]=10
(logger_proxy,
logging_mutex) = make_shared_logger_and_proxy (setup_std_shared_logger,
"my_logger", args)
==============
To use:
==============
::
(logger_proxy,
logging_mutex) = make_shared_logger_and_proxy (setup_std_shared_logger,
"my_logger", args)
with logging_mutex:
my_log.debug('This is a debug message')
my_log.info('This is an info message')
my_log.warning('This is a warning message')
my_log.error('This is an error message')
my_log.critical('This is a critical error message')
my_log.log(logging.DEBUG, 'This is a debug message')
Note that the logging function ``exception()`` is not included because python
stack trace information is not well-marshalled
(`pickle <http://docs.python.org/library/pickle.html>`_\ d) across processes.
"""
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import sys,os
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Shared logging
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import multiprocessing
import multiprocessing.managers
import logging
import logging.handlers
#
# setup_logger
#
def setup_std_shared_logger(logger_name, args):
"""
This function is a simple around wrapper around the python
`logging <http://docs.python.org/library/logging.html>`_ module.
This *logger_factory* example creates logging objects which can
then be managed by proxy via ``ruffus.proxy_logger.make_shared_logger_and_proxy()``
This can be:
* a `disk log file <http://docs.python.org/library/logging.html#filehandler>`_
* a automatically backed-up `(rotating) log <http://docs.python.org/library/logging.html#rotatingfilehandler>`_.
* any log specified in a `configuration file <http://docs.python.org/library/logging.html#configuration-file-format>`_
These are specified in the ``args`` dictionary forwarded by ``make_shared_logger_and_proxy()``
:param logger_name: name of log
:param args: a dictionary of parameters forwarded from ``make_shared_logger_and_proxy()``
Valid entries include:
.. describe:: "level"
Sets the `threshold <http://docs.python.org/library/logging.html#logging.Handler.setLevel>`_ for the logger.
.. describe:: "config_file"
The logging object is configured from this `configuration file <http://docs.python.org/library/logging.html#configuration-file-format>`_.
.. describe:: "file_name"
Sets disk log file name.
.. describe:: "rotating"
Chooses a `(rotating) log <http://docs.python.org/library/logging.html#rotatingfilehandler>`_.
.. describe:: "maxBytes"
Allows the file to rollover at a predetermined size
.. describe:: "backupCount"
If backupCount is non-zero, the system will save old log files by appending the extensions ``.1``, ``.2``, ``.3`` etc., to the filename.
.. describe:: "delay"
Defer file creation until the log is written to.
.. describe:: "formatter"
`Converts <http://docs.python.org/library/logging.html#formatter-objects>`_ the message to a logged entry string.
For example,
::
"%(asctime)s - %(name)s - %(levelname)6s - %(message)s"
"""
#
# Log file name with logger level
#
new_logger = logging.getLogger(logger_name)
if "level" in args:
new_logger.setLevel(args["level"])
if "config_file" in args:
logging.config.fileConfig(args["config_file"])
else:
if "file_name" not in args:
raise Exception("Missing file name for log. Remember to set 'file_name'")
log_file_name = args["file_name"]
if "rotating" in args:
rotating_args = {}
# override default
rotating_args["maxBytes"]=args.get("maxBytes", 100000)
rotating_args["backupCount"]=args.get("backupCount", 5)
handler = logging.handlers.RotatingFileHandler( log_file_name, **rotating_args)
else:
defer_loggin = "delay" in args
handler = logging.handlers.RotatingFileHandler( log_file_name, delay=defer_loggin)
# %(name)s
# %(levelno)s
# %(levelname)s
# %(pathname)s
# %(filename)s
# %(module)s
# %(funcName)s
# %(lineno)d
# %(created)f
# %(relativeCreated)d
# %(asctime)s
# %(msecs)d
# %(thread)d
# %(threadName)s
# %(process)d
# %(message)s
#
# E.g.: "%(asctime)s - %(name)s - %(levelname)6s - %(message)s"
#
if "formatter" in args:
my_formatter = logging.Formatter(args["formatter"])
handler.setFormatter(my_formatter)
new_logger.addHandler(handler)
#
# This log object will be wrapped in proxy
#
return new_logger
#
# Proxy object for logging
# Logging messages will be marshalled (forwarded) to the process where the
# shared log lives
#
class LoggerProxy(multiprocessing.managers.BaseProxy):
def debug(self, *args, **kwargs):
return self._callmethod('debug', args, kwargs)
def log(self, *args, **kwargs):
return self._callmethod('log', args, kwargs)
def info(self, *args, **kwargs):
return self._callmethod('info', args, kwargs)
def warning(self, *args, **kwargs):
return self._callmethod('warning', args, kwargs)
def error(self, *args, **kwargs):
return self._callmethod('error', args, kwargs)
def critical(self, *args, **kwargs):
return self._callmethod('critical', args, kwargs)
def log(self, *args, **kwargs):
return self._callmethod('log', args, kwargs)
def __str__ (self):
return "<LoggingProxy>"
def __repr__ (self):
return 'LoggerProxy()'
#
# Register the setup_logger function as a proxy for setup_logger
#
# We use SyncManager as a base class so we can get a lock proxy for synchronising
# logging later on
#
class LoggingManager(multiprocessing.managers.SyncManager):
"""
Logging manager sets up its own process and will create the real Log object there
We refer to this (real) log via proxies
"""
pass
def make_shared_logger_and_proxy (logger_factory, logger_name, args):
"""
Make a `logging <http://docs.python.org/library/logging.html>`_ object
called "\ ``logger_name``\ " by calling ``logger_factory``\ (``args``\ )
This function will return a proxy to the shared logger which can be copied to jobs
in other processes, as well as a mutex which can be used to prevent simultaneous logging
from happening.
:param logger_factory: functions which creates and returns an object with the
`logging <http://docs.python.org/library/logging.html>`_ interface.
``setup_std_shared_logger()`` is one example of a logger factory.
:param logger_name: name of log
:param args: parameters passed (as a single argument) to ``logger_factory``
:returns: a proxy to the shared logger which can be copied to jobs in other processes
:returns: a mutex which can be used to prevent simultaneous logging from happening
"""
#
# make shared log and proxy
#
manager = LoggingManager()
manager.register( 'setup_logger',
logger_factory,
proxytype=LoggerProxy,
exposed = ( 'critical', 'log',
'info', 'debug', 'warning', 'error'))
manager.start()
logger_proxy = manager.setup_logger(logger_name, args)
#
# make sure we are not logging at the same time in different processes
#
logging_mutex = manager.Lock()
return logger_proxy, logging_mutex
import unittest, os,sys
from .proxy_logger import *
import traceback
class Test_Logging(unittest.TestCase):
def test_rotating_log(self):
"""
test rotating via proxy
"""
open("/tmp/lg.log", "w").close()
args={}
args["file_name"] = "/tmp/lg.log"
args["rotating"] = True
args["maxBytes"]=20000
args["backupCount"]=10
#args["level"]= logging.INFO
(my_log,
logging_mutex) = make_shared_logger_and_proxy (setup_std_shared_logger,
"my_logger", args)
with logging_mutex:
my_log.debug('This is a debug message')
my_log.info('This is an info message')
my_log.warning('This is a warning message')
my_log.error('This is an error message')
my_log.critical('This is a critical error message')
my_log.log(logging.ERROR, 'This is a debug message')
self.assert_(open("/tmp/lg.log") .read() == \
"""This is a warning message
This is an error message
This is a critical error message
This is a debug message
""")
#
# debug code not run if called as a module
#
if __name__ == '__main__':
if sys.argv.count("--debug"):
sys.argv.remove("--debug")
unittest.main()
|
|
#ipython --pylab
import scipy
import glob
from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
from matplotlib.colors import ListedColormap
viridis = ListedColormap(_viridis_data, name='viridis')
plt.register_cmap(name='viridis', cmap=viridis)
plt.set_cmap(viridis)
Files = glob.glob('Results/P50Depth_ABC/*.nc')
File2 = glob.glob('Results/60umolkg_Depth/*.nc')
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(7.66,6))
plt.subplots_adjust(bottom=0.2)
file = Files[0]
nc = Dataset(file,'r')
lats = nc.variables['LATITUDE'][:]
lons = nc.variables['LONGITUDE'][:]
depth = nc.variables['P50DEPTH'][:]
depth = depth.squeeze()
fig1 = plt.subplot(2, 2, 1)
m = Basemap(llcrnrlat=-80.,urcrnrlat=80.,projection='cyl',lon_0=200)
depth_cyclic, lons_cyclic = addcyclic(depth[:,:], lons)
#depth_cyclic, lons_cyclic = shiftgrid(20., depth_cyclic, lons_cyclic, start=True)
x, y = m(*np.meshgrid(lons_cyclic, lats))
m.drawmapboundary() #fill_color='0.5'
m.drawcoastlines()
m.fillcontinents(color='grey', lake_color='0.5')
m.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0])
m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,0])
im1 = m.pcolor(x,y,depth, vmin=0, vmax=1100)
#im2 = m.pcolor(a,b,depth,shading='flat',cmap=plt.cm.jet_r, vmin=0, vmax=1100)
fig1.set_title(r'Low P50 and $-\Delta$H')
file = Files[1]
nc = Dataset(file,'r')
lats = nc.variables['LATITUDE'][:]
lons = nc.variables['LONGITUDE'][:]
depth = nc.variables['P50DEPTH'][:]
depth = depth.squeeze()
fig2 = plt.subplot(2, 2, 2)
m = Basemap(llcrnrlat=-80.,urcrnrlat=80.,projection='cyl',lon_0=200)
depth_cyclic, lons_cyclic = addcyclic(depth[:,:], lons)
depth_cyclic, lons_cyclic = shiftgrid(20., depth_cyclic, lons_cyclic, start=True)
x, y = m(*np.meshgrid(lons_cyclic, lats))
m.drawmapboundary() #fill_color='0.5'
m.drawcoastlines()
m.fillcontinents(color='grey', lake_color='0.5')
m.drawparallels(np.arange(-90.,120.,30.),labels=[0,0,0,0])
m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,0])
im1 = m.pcolor(x,y,depth,vmin=0, vmax=1100)
#im2 = m.pcolor(a,b,depth,shading='flat',cmap=plt.cm.jet_r, vmin=0, vmax=1100)
fig2.set_title(r'Low P50 and $+\Delta$H')
file = Files[2]
nc = Dataset(file,'r')
lats = nc.variables['LATITUDE'][:]
lons = nc.variables['LONGITUDE'][:]
depth = nc.variables['P50DEPTH'][:]
depth = depth.squeeze()
fig3 = plt.subplot(2, 2, 3)
m = Basemap(llcrnrlat=-80.,urcrnrlat=80.,projection='cyl',lon_0=200)
depth_cyclic, lons_cyclic = addcyclic(depth[:,:], lons)
depth_cyclic, lons_cyclic = shiftgrid(20., depth_cyclic, lons_cyclic, start=True)
x, y = m(*np.meshgrid(lons_cyclic, lats))
m.drawmapboundary() #fill_color='0.5'
m.drawcoastlines()
m.fillcontinents(color='grey', lake_color='0.5')
m.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0])
m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,1])
im1 = m.pcolor(x,y,depth,vmin=0, vmax=1100)
#im2 = m.pcolor(a,b,depth,shading='flat',cmap=plt.cm.jet_r, vmin=0, vmax=1100)
fig3.set_title(r'High P50 and $-\Delta$H')
file = File2[0]
nc = Dataset(file,'r')
lats = nc.variables['LATITUDE'][:]
lons = nc.variables['LONGITUDE'][:]
depth = nc.variables['DEPTH_60UMOLKG'][:]
depth = depth.squeeze()
fig4 = plt.subplot(2, 2, 4)
m = Basemap(llcrnrlat=-80.,urcrnrlat=80.,projection='cyl',lon_0=200)
depth_cyclic, lons_cyclic = addcyclic(depth[:,:], lons)
#depth_cyclic, lons_cyclic = shiftgrid(20., depth_cyclic, lons_cyclic, start=True)
x, y = m(*np.meshgrid(lons_cyclic, lats))
m.drawmapboundary() #fill_color='0.5'
m.drawcoastlines()
m.fillcontinents(color='grey', lake_color='0.5')
m.drawparallels(np.arange(-90.,120.,30.),labels=[0,0,0,0])
m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,1])
im1 = m.pcolor(x,y,depth,vmin=0, vmax=1100)
#im2 = m.pcolor(a,b,depth,shading='flat',cmap=plt.cm.jet_r, vmin=0, vmax=1100)
fig4.set_title(r'60 $\mu$mol kg$^{-1}$')
cbar_ax = fig.add_axes([0.2, 0.1, 0.6, 0.03])
fig.colorbar(im1, cax=cbar_ax, orientation="horizontal")
#plt.show()
outfig = 'Graphs/P50Depth_ABC_Maps.ps'
#fig.set_size_inches(7.5,10)
plt.savefig(outfig, dpi=300, bbox_inches=0)
quit()
|
|
#!/usr/bin/env python3
import os
import sys
import string
from importlib import reload
import getKerningPairsFromOTF
reload(getKerningPairsFromOTF)
__doc__ = '''\
This script extracts a viable kern feature file from a compiled OTF.
It requires the script 'getKerningPairsFromOTF.py'; which is distributed
in the same folder.
usage:
python dumpKernFeatureFromOTF.py font.otf > outputfile
'''
kKernFeatureTag = 'kern'
compressSinglePairs = True
# Switch to control if single pairs shall be written plainly, or in a more
# space-saving notation (using enum pos).
def sortGlyphs(glyphlist):
# Sort glyphs in a way that glyphs from the exceptionList, or glyphs
# starting with 'uni' names do not get to be key (first) glyphs.
# An infinite loop is avoided, in case there are only glyphs matching
# above mentioned properties.
exceptionList = 'dotlessi dotlessj kgreenlandic ae oe AE OE uhorn'.split()
glyphs = sorted(glyphlist)
for i in range(len(glyphs)):
if glyphs[0] in exceptionList or glyphs[0].startswith('uni'):
glyphs.insert(len(glyphs), glyphs.pop(0))
else:
continue
return glyphs
def nameClass(glyphlist, flag):
glyphs = sortGlyphs(glyphlist)
if len(glyphs) == 0:
name = 'error!!!'
else:
name = glyphs[0]
if name in string.ascii_lowercase:
case = '_LC'
elif name in string.ascii_uppercase:
case = '_UC'
else:
case = ''
flag = flag
return '@%s%s%s' % (name, flag, case)
def buildOutputList(sourceList, outputList, headlineString):
if len(sourceList):
headline = headlineString
decoration = '-' * len(headline)
outputList.append('# ' + headline)
outputList.append('# ' + decoration)
for item in sourceList:
outputList.append(item)
outputList.append('')
def makeKernFeature(fontPath):
f = getKerningPairsFromOTF.OTFKernReader(fontPath)
allClasses = {}
classList = []
output = []
for kerningClass in f.allLeftClasses:
glyphs = sortGlyphs(f.allLeftClasses[kerningClass])
className = nameClass(glyphs, '_LEFT')
allClasses.setdefault(className, glyphs)
for kerningClass in f.allRightClasses:
glyphs = sortGlyphs(f.allRightClasses[kerningClass])
className = nameClass(glyphs, '_RIGHT')
allClasses.setdefault(className, glyphs)
singlePairsList = sorted(f.singlePairs.items())
classPairsList = []
for (leftClass, rightClass), value in sorted(f.classPairs.items()):
leftGlyphs = sortGlyphs(f.allLeftClasses[leftClass])
leftClassName = nameClass(leftGlyphs, '_LEFT')
rightGlyphs = sortGlyphs(f.allRightClasses[rightClass])
rightClassName = nameClass(rightGlyphs, '_RIGHT')
classPairsList.append(((leftClassName, rightClassName), value))
for className in sorted(allClasses):
glyphs = allClasses[className]
classList.append('%s = [ %s ];' % (className, ' '.join(glyphs)))
buildOutputList(
[], output, 'kern feature dumped from %s' % os.path.basename(fontPath))
buildOutputList(
classList, output, 'kerning classes')
if compressSinglePairs:
leftGlyphsDict = {}
rightGlyphsDict = {}
compressedLeft = []
compressedBoth = []
class_glyph = []
glyph_class = []
glyph_glyph = []
exploding_class_class = []
# Compress the single pairs to a more space-saving notation.
# First, dictionaries for each left glyph are created.
# If the kerning value to any right glyph happens to be equal,
# those right glyphs are merged into a 'class'.
for (left, right), value in singlePairsList:
leftGlyph = left
leftGlyphsDict.setdefault(leftGlyph, {})
kernValueDict = leftGlyphsDict[leftGlyph]
kernValueDict.setdefault(value, []).append(right)
for left in leftGlyphsDict:
for value in leftGlyphsDict[left]:
right = leftGlyphsDict[left][value]
right = sortGlyphs(right)
compressedLeft.append((left, right, value))
# Same happens for the right side; including classes that
# have been compressed before.
for left, right, value in compressedLeft:
rightGlyph = ' '.join(right)
rightGlyphsDict.setdefault(rightGlyph, {})
kernValueDict = rightGlyphsDict[rightGlyph]
kernValueDict.setdefault(value, []).append(left)
for right in rightGlyphsDict:
for value in rightGlyphsDict[right]:
left = rightGlyphsDict[right][value]
left = sortGlyphs(left)
compressedBoth.append((left, right.split(), value))
# Splitting the compressed single-pair kerning into four different
# lists; organized by type:
for left, right, value in compressedBoth:
if len(left) != 1 and len(right) != 1:
exploding_class_class.append(
'enum pos [ %s ] [ %s ] %s;' % (' '.join(left), ' '.join(right), value))
elif len(left) != 1 and len(right) == 1:
class_glyph.append(
'enum pos [ %s ] %s %s;' % (' '.join(left), ' '.join(right), value))
elif len(left) == 1 and len(right) != 1:
glyph_class.append(
'enum pos %s [ %s ] %s;' % (' '.join(left), ' '.join(right), value))
elif len(left) == 1 and len(right) == 1:
glyph_glyph.append(
'pos %s %s %s;' % (' '.join(left), ' '.join(right), value))
else:
print('ERROR with (%s)' % (' '.join(left, right, value)))
# Making sure all the pairs made it through the process:
if len(compressedBoth) != len(class_glyph) + len(glyph_class) + len(glyph_glyph) + len(exploding_class_class):
print('ERROR - we lost some kerning pairs.')
buildOutputList(glyph_glyph, output, 'glyph to glyph')
buildOutputList(glyph_class, output, 'glyph to class')
buildOutputList(class_glyph, output, 'class to glyph')
buildOutputList(exploding_class_class, output, 'exploding class to exploding class')
else:
# Plain list of single pairs
glyph_glyph = []
for (left, right), value in singlePairsList:
glyph_glyph.append('pos %s %s %s;' % (left, right, value))
buildOutputList(glyph_glyph, output, 'glyph to glyph')
# List of class-to-class pairs
class_class = []
for (left, right), value in classPairsList:
class_class.append('pos %s %s %s;' % (left, right, value))
buildOutputList(class_class, output, 'class to class')
print('\n'.join(output))
def main():
if len(sys.argv) == 2:
assumedFontPath = sys.argv[1]
if (
os.path.exists(assumedFontPath) and
os.path.splitext(assumedFontPath)[1].lower() in ['.otf', '.ttf']
):
fontPath = sys.argv[1]
makeKernFeature(fontPath)
else:
print("No valid font provided.")
else:
print("No valid font provided.")
if __name__ == "__main__":
main()
|
|
# -*- coding: cp936 -*-
"""
The main purpose of this script is to generate stat for Single end Read
(Stranded) mainly CLIP-Seq reads regarding to transcript END.
=============================
Usage: python SE_RNASeq_diff_TailStat_obj_v0.1.alpha.py
-h help
-i files to processed *[No default value]
-b bed_infile to process *[No default value]
-R full reference length *[No default value]
dict format
-r GTF for transcription END file *[No default value]
(for differential END site)
-o prefix of output files [default:"output"]
-s suffix for the files to be processed [default value "txt"]
-S output SUFFIX [default: "txt"]
-d sep_char within among columns [default value '\t']
-j skip header lines [default value 1]
if skip header lines = 0 indicates that there is no header line
if skip header lines = n indicates that first n lines are header lines
-I input file path [default: current folder]
-O output file path [default: current folder]
-L unique_id length for the infile [default value 2]
===================
input description:
input files:
1. *Tail.txt AND BED file Corresponding
(Output Txt from SAM2SE_Tail_length_obj_v0.2.alpha.py)
2. GTF file lable the transcription END for each reference (-r)
differential END Site
format: first column reference gene/ID
second column: the correct coordinate /range
third column: the incorrect coordinate /range
fourth column: the name of the gene/transcripts
3. Dict file (-R)
======================
output files:
<1> stat file
<2> Tail length stat
1. all count, the distribution of tail length (total reads), total count
2. the distribution of A,T,C,G 4 nucleotide for all tails
3. the read count of Atail (>90 percent A), percentage
4. the read count of Utail (>90 percent T), percentage
5. UA tail (>90 percent AU)
6. No tail count
<3> per gene stat file
<4> Per gene tail length stat
============================
Python & Module requirement:
Versions 2.x : 2.4 or above
Module: No additional Python Module is required.
============================
Library file requirement:
Not Standalone version, few library file is required.
============================
External Tools requirement:
============================
command line example:
============================
versions update
v0.0:
"""
##Copyright
##By Liye Zhang
##Contact: bioliyezhang@gmail.com
##Compatible Python Version:2.4 or above
###Code Framework
### Specific Functions definiation
def extract_info2coor_list(tail_info):
region_count=tail_info.count(",")
final_region_list = []
if region_count>=1:
region_list = tail_info.split(",")
for region in region_list:
if region.count("-")==1:
start = int(region.split("-")[0])
end = int(region.split("-")[1])
for index in range(start,end+1):
final_region_list.append(index)
else:
final_region_list.append(int(region))
else:
if tail_info.count("-")==1:
start = int(tail_info.split("-")[0])
end = int(tail_info.split("-")[1])
for index in range(start,end+1):
final_region_list.append(index)
else:
final_region_list.append(int(tail_info))
return final_region_list
def list2fastq(data_list,prefix,infile_obj):
outfile_name=infile_obj.outputfilename_gen(prefix,"fq") ##create output file
outfile_path=OUTPUT_PATH+"/"+outfile_name
outfile_obj=GeneralFile_class(outfile_path)
outfile_obj.output_handle_gen()
for item in data_list:
ID=item[0]
seq=item[1]
outfile_obj.handle.write("@"+ID+'\n')
outfile_obj.handle.write(seq+"\n")
outfile_obj.handle.write("+\n")
outfile_obj.handle.write("P"*len(seq)+'\n')
outfile_obj.handle.close()
def specific_function(infiles,bed_infiles,reference,full_reference):
##Section I: Generate the gene annotation dictionary
cmd_records=record_command_line() ##record the command line
## Module 1: Process the full reference (dict format)
infile_obj=GTF_File_class(full_reference)
infile_obj.SKIP_HEADER=1 ##unique ID length
infile_reader=infile_obj.reader_gen()
filtered_f_range_dict = dict() ## record the region needs to care
filtered_r_range_dict = dict()
ref_chro_list = [] ## reference list
length_dict = dict() ## reference length dict
for row in infile_reader:
ref_ID = row[1][3:]
full_length = int(row[2][3:])
filtered_f_range_dict[ref_ID]=[0]*full_length ## f
filtered_r_range_dict[ref_ID]=[0]*full_length ## r
length_dict[ref_ID] = full_length
## Module 2: Process the gene Diff End information (not canonical format)
## Need to adjust this..!
infile_obj=GeneralFile_class(reference) ##create file obj(class)
infile_obj.SAMPLE_ID_LEN=unique_id_length ##unique ID length
infile_reader=infile_obj.reader_gen()
ref_gene_list = []
for row in infile_reader:
ref_ID = row[1]
if ref_ID not in ref_chro_list:
ref_chro_list.append(ref_ID)
gene = row[0]
if gene not in ref_gene_list:
ref_gene_list.append(gene)
strand= row[4]
correct_end_coor_info = row[2]
correct_end_coor_list = extract_info2coor_list(correct_end_coor_info)
cryptic_end_coor_info = row[3]
cryptic_end_coor_list = extract_info2coor_list(cryptic_end_coor_info)
for index in correct_end_coor_list:
if strand=="+":
filtered_f_range_dict[ref_ID][index-1]=gene+"+"
else:
filtered_r_range_dict[ref_ID][index-1]=gene+"+"
for index in cryptic_end_coor_list:
if strand=="+":
filtered_f_range_dict[ref_ID][index-1]=gene+"-"
else:
filtered_r_range_dict[ref_ID][index-1]=gene+"-"
nt_list = ["A","T","G","C"]
## Module 3: Pair input file and BED file first
if len(infiles)!=len(bed_infiles):
print "Inconsistent Pairs,Please check output"
sys.exit(0)
else:
paired_list = generate_paired_files_by_ID(infiles+bed_infiles,2,"txt","bed")
## return first file is txt , second file is bed
## Module 4: Data Processing
for input_list in paired_list:
infile = input_list[0]
bed_infile = input_list[1]
## Module 4.1 : initiaze the dict that store the data
print "Processing infile:", infile
stat_dict_canonical = dict()
stat_dict_cryptic = dict()
len_dist_canonical_dict = dict()
len_dist_cryptic_dict = dict()
tail_dict_canonical = dict()
tail_dict_cryptic = dict()
for gene in ref_gene_list:
stat_dict_canonical[gene]=[0.0]*11 ## similar recording for each gene
stat_dict_cryptic[gene]=[0.0]*11 ## similar recording for each gene
len_dist_canonical_dict[gene] = []
len_dist_cryptic_dict[gene] = []
tail_dict_canonical[gene] = []
tail_dict_cryptic[gene] = []
#stat_dict[gene] = [0.0]*11
## each reference:
## within range count (1,index0)
## a Tail count (2,index1)
## u Tail count (3,index2) ;
## Au count (4,index3) ;
## unknown (5,index4)
## nt (A,T,C,G) count 6-9 ; all atcg 10
## no-tail count 11
all_tail_count = 0
within_range_tail_count = 0 ## cryptic or canonical
a_tail_count = 0
u_tail_count = 0
au_tail_count = 0
all_au_tail_count = 0
tail_length_list = []
nt_dict = dict()
for nt in nt_list:
nt_dict[nt]=0
## Module 4.2: include the notail bed file processing
infile_obj=BEDFile_class(bed_infile) ##create file obj(class)
infile_reader=infile_obj.reader_gen() ##create the file reader to process infile
for row in infile_reader:
chro=row[infile_obj.CHRO_COLUMN]
coor=int(row[infile_obj.START_COLUMN])
strand=row[infile_obj.STRAND_COLUMN]
targeted_region=False
if strand=="+" and filtered_f_range_dict[chro][coor]!=0:
targeted_region=True
elif strand=="-" and filtered_r_range_dict[chro][coor]!=0:
targeted_region=True
pass
if targeted_region:
if strand=="+":
tail_type=filtered_f_range_dict[chro][coor][-1]
gene = filtered_f_range_dict[chro][coor][:-1]
else:
tail_type=filtered_r_range_dict[chro][coor][-1]
gene = filtered_r_range_dict[chro][coor][:-1]
readcount = int(row[infile_obj.SCORE_COLUMN])
within_range_tail_count+=readcount
if tail_type=="+":
stat_dict_canonical[gene][10]+=readcount ## notail count
stat_dict_canonical[gene][0]+=readcount
len_dist_canonical_dict[gene]=len_dist_canonical_dict[gene]+[0]*readcount
else:
stat_dict_cryptic[gene][10]+=readcount
stat_dict_cryptic[gene][0]+=readcount
len_dist_cryptic_dict[gene]=len_dist_cryptic_dict[gene]+[0]*readcount
## to be continued:: test within the region add/update stat accordingly
## Module 4.3 : Process the Tail input file
infile_obj=GeneralFile_class(infile) ##create file obj(class)
infile_obj.SKIP_HEADER=infile_skip ##setup up the manual skip header if necessary
infile_obj.SAMPLE_ID_LEN=unique_id_length ##unique ID length
infile_reader=infile_obj.reader_gen() ##create the file reader to process infile
for row in infile_reader:
ref_ID = row[6]
if ref_ID.startswith("Tb"):
if ref_ID.endswith("100nt")==False:
ref_ID=ref_ID+"_100nt"
all_tail_count+=1
strand = row[9]
#print "strand",strand
tail_seq = row[2]
coor_end = int(row[8])
tail_length = int(row[3])
#final_coor = coor_end-tail_length
if coor_end>length_dict[ref_ID]:
coor_end= length_dict[ref_ID]
targeted_region=False
if strand=="+" and filtered_f_range_dict[ref_ID][coor_end-1]!=0:
targeted_region=True
elif strand=="-" and filtered_r_range_dict[ref_ID][coor_end-1]!=0:
targeted_region=True
else:
pass
if targeted_region:
#print "come here"
if strand=="+":
tail_type=filtered_f_range_dict[ref_ID][coor_end-1][-1]
gene = filtered_f_range_dict[ref_ID][coor_end-1][:-1]
else:
tail_type=filtered_r_range_dict[ref_ID][coor_end-1][-1]
gene = filtered_r_range_dict[ref_ID][coor_end-1][:-1]
within_range_tail_count+=1
tail_ID = row[0]
if tail_type=="+":
stat_dict_canonical[gene][0]+=1
len_dist_canonical_dict[gene].append(tail_length)
tail_dict_canonical[gene].append([tail_ID,tail_seq])
else:
stat_dict_cryptic[gene][0]+=1
len_dist_cryptic_dict[gene].append(tail_length)
tail_dict_cryptic[gene].append([tail_ID,tail_seq])
tail_length_list.append(tail_length)
for nt in tail_seq:
nt_dict[nt]+=1
if tail_type=="+":
stat_dict_canonical[gene][9]+=1
else:
stat_dict_cryptic[gene][9]+=1
if nt=="A" or nt=="a":
if tail_type=="+":
stat_dict_canonical[gene][5]+=1
else:
stat_dict_cryptic[gene][5]+=1
elif nt=="T" or nt=="t":
if tail_type=="+":
stat_dict_canonical[gene][6]+=1
else:
stat_dict_cryptic[gene][6]+=1
elif nt=="C" or nt=="c":
if tail_type=="+":
stat_dict_canonical[gene][7]+=1
else:
stat_dict_cryptic[gene][7]+=1
elif nt=="G" or nt=="g":
if tail_type=="+":
stat_dict_canonical[gene][8]+=1
else:
stat_dict_cryptic[gene][8]+=1
elif nt=="N" or nt=="n":
pass
else:
print "unrecognized nt",nt
sys.exit(0)
## check polyA tail
tail_definition=row[-2]
if tail_definition=="Atail":
if tail_type == "+":
stat_dict_canonical[gene][1]+=1
else:
stat_dict_cryptic[gene][1]+=1
elif tail_definition=="Utail":
if tail_type == "+":
stat_dict_canonical[gene][2]+=1
else:
stat_dict_cryptic[gene][2]+=1
elif tail_definition=="AUtail":
if tail_type == "+":
stat_dict_canonical[gene][3]+=1
else:
stat_dict_cryptic[gene][3]+=1
elif tail_definition=="unknown":
if tail_type == "+":
stat_dict_canonical[gene][4]+=1
else:
stat_dict_cryptic[gene][4]+=1
'''
if tail_seq.count("A")>=int(float(tail_length)*0.90):
a_tail_count+=1
# a tail count (index1)
if tail_type=="+":
stat_dict_canonical[gene][1]+=1
else:
stat_dict_cryptic[gene][1]+=1
if tail_seq.count("T")>=int(float(tail_length)*0.90):
u_tail_count+=1
# u tail count (index2)
if tail_type=="+":
stat_dict_canonical[gene][2]+=1
else:
stat_dict_cryptic[gene][2]+=1
if (tail_seq.count("A")+tail_seq.count("T"))>=int(float(tail_length)*0.90):
au_tail_count+=1
# within range au tail count (index4)
if tail_type=="+":
stat_dict_canonical[gene][3]+=1
else:
stat_dict_cryptic[gene][3]+=1
'''
print "nt_dict",nt_dict
##Setup output file
# outfile_name=infile_obj.outputfilename_gen("stats",OUTPUT_SUFFIX) ##create output file
# outfile_path=OUTPUT_PATH+"/"+outfile_name
# outfile_obj=GeneralFile_class(outfile_path) ##create output obj
# outfile_obj.RECORD=cmd_records
# outfile_obj.output_handle_gen() ##generate output handle
# outfile_obj.handle.write("#Item\tCount\n")
# outfile_obj.handle.write("all tail count\t"+str(all_tail_count)+'\n')
# outfile_obj.handle.write("within range tail count\t"+str(within_range_tail_count)+'\n')
# within_range_tail_percent = float(within_range_tail_count)*100.00/float(all_tail_count)
# outfile_obj.handle.write("within range tail percent\t"+str(within_range_tail_percent)+'\n')
# outfile_obj.handle.write("A tail count\t"+str(a_tail_count)+'\n')
# outfile_obj.handle.write("A tail percent within range tail\t")
# a_tail_percent = 100.00 * float(a_tail_count)/float(within_range_tail_count)
# outfile_obj.handle.write(str(a_tail_percent)+'\n')
# outfile_obj.handle.write("U tail count\t"+str(u_tail_count)+'\n')
# u_tail_percent = 100.00 * float(u_tail_count)/float(within_range_tail_count)
# outfile_obj.handle.write(str(u_tail_percent)+'\n')
# outfile_obj.handle.write("AU tail count\t"+str(au_tail_count)+'\n')
# au_tail_percent = 100.00 * float(au_tail_count)/float(within_range_tail_count)
# outfile_obj.handle.write(str(au_tail_percent)+'\n')
# outfile_obj.handle.write("All + strand AU tail count\t"+str(all_au_tail_count)+'\n')
#all_au_tail_percent = 100.00 * float(all_au_tail_count)/float(within_range_tail_count)
#outfile_obj.handle.write(str(all_au_tail_percent)+'\n')
# nt_all_count = float(sum(nt_dict.values()))
#a_percent = 100.00 * float(nt_dict["A"])/nt_all_count
#outfile_obj.handle.write("A %\t"+str(a_percent)+'\n')
#c_percent = 100.00 * float(nt_dict["C"])/nt_all_count
#outfile_obj.handle.write("C %\t"+str(c_percent)+'\n')
#t_percent = 100.00 * float(nt_dict["T"])/nt_all_count
#outfile_obj.handle.write("T %\t"+str(t_percent)+'\n')
#g_percent = 100.00 * float(nt_dict["G"])/nt_all_count
#outfile_obj.handle.write("G %\t"+str(g_percent)+'\n')
# outfile_obj.handle.close()
## tail length distribution
outfile_name=infile_obj.outputfilename_gen("tail_length_dist",OUTPUT_SUFFIX) ##create output file
outfile_path=OUTPUT_PATH+"/"+outfile_name
outfile_obj=GeneralFile_class(outfile_path) ##create output obj
outfile_obj.RECORD=cmd_records
outfile_obj.output_handle_gen() ##generate output handle
max_length= max(tail_length_list)
for index in range(1,max_length+1):
tail_count = tail_length_list.count(index)
outfile_obj.handle.write(str(index)+'\t'+str(tail_count)+'\n')
outfile_obj.handle.close()
## per gene output
outfile_name=infile_obj.outputfilename_gen("per_gene_stat",OUTPUT_SUFFIX) ##create output file
outfile_path=OUTPUT_PATH+"/"+outfile_name
outfile_obj=GeneralFile_class(outfile_path)
outfile_obj.RECORD=cmd_records
outfile_obj.output_handle_gen()
## output header
outfile_obj.handle.write("Description")
for gene in ref_gene_list:
## output header
outfile_obj.handle.write('\t'+gene+"_canonical")
outfile_obj.handle.write('\t'+gene+"_cryptic")
outfile_obj.handle.write('\n')
description_list=["Within_range_tail_count","A tail count "
,"U tail count ","AU tail count","unknown tail count", "(within) A nt %",
"U nt %","C nt %", "G nt %", "N nt","no_tail count"]
for index in range(11):
if index==0:
outfile_obj.handle.write(description_list[index])
for gene in ref_gene_list:
data=str(stat_dict_canonical[gene][index])
outfile_obj.handle.write('\t'+data)
data=str(stat_dict_cryptic[gene][index])
outfile_obj.handle.write('\t'+data)
outfile_obj.handle.write('\n')
if index in [1,2,3,4,10]:
outfile_obj.handle.write(description_list[index])
for gene in ref_gene_list:
data=str(stat_dict_canonical[gene][index])
outfile_obj.handle.write('\t'+data)
data=str(stat_dict_cryptic[gene][index])
outfile_obj.handle.write('\t'+data)
outfile_obj.handle.write('\n')
outfile_obj.handle.write(description_list[index]+"%")
for gene in ref_gene_list:
if stat_dict_canonical[gene][0]==0:
total=1.00
else:
total=stat_dict_canonical[gene][0]
data=str(stat_dict_canonical[gene][index]*100.00/total)
outfile_obj.handle.write('\t'+data)
if stat_dict_cryptic[gene][0]==0:
total=1.00
else:
total=stat_dict_cryptic[gene][0]
data=str(stat_dict_cryptic[gene][index]*100.00/total)
outfile_obj.handle.write('\t'+data)
outfile_obj.handle.write('\n')
if index>=5 and index<=8:
outfile_obj.handle.write(description_list[index])
for gene in ref_gene_list:
if stat_dict_canonical[gene][9]==0:
total=1.00
else:
total=stat_dict_canonical[gene][9]
data=str(stat_dict_canonical[gene][index]*100.00/total)
outfile_obj.handle.write('\t'+data)
if stat_dict_cryptic[gene][9]==0:
total=1.00
else:
total=stat_dict_cryptic[gene][9]
data=str(stat_dict_cryptic[gene][index]*100.00/total)
outfile_obj.handle.write('\t'+data)
outfile_obj.handle.write('\n')
outfile_obj.handle.close()
## tail per gene length distribution
outfile_name=infile_obj.outputfilename_gen("tail_length_dist_per_gene",OUTPUT_SUFFIX) ##create output file
outfile_path=OUTPUT_PATH+"/"+outfile_name
outfile_obj=GeneralFile_class(outfile_path) ##create output obj
outfile_obj.RECORD=cmd_records
outfile_obj.output_handle_gen() ##generate output handle
outfile_obj.handle.write("#length")
for gene in ref_gene_list:
outfile_obj.handle.write('\t'+gene+"_canonical")
outfile_obj.handle.write('\t'+gene+"_cryptic")
outfile_obj.handle.write('\n')
max_length= max(tail_length_list)
for index in range(0,max_length+1):
outfile_obj.handle.write(str(index))
for gene in ref_gene_list:
tail_length_list = len_dist_canonical_dict[gene]
tail_count = tail_length_list.count(index)
outfile_obj.handle.write('\t'+str(tail_count))
tail_length_list = len_dist_cryptic_dict[gene]
tail_count = tail_length_list.count(index)
outfile_obj.handle.write('\t'+str(tail_count))
outfile_obj.handle.write('\n')
outfile_obj.handle.close()
## per gene output (fastq file)
for gene in ref_gene_list:
## output cyptic
prefix=gene+"-cyptic"
if len(tail_dict_cryptic[gene])>0:
list2fastq(tail_dict_cryptic[gene],prefix,infile_obj)
prefix=gene+"-canonical"
if len(tail_dict_canonical[gene])>0:
list2fastq(tail_dict_canonical[gene],prefix,infile_obj)
if __name__ == "__main__":
###Python General Module Import
import sys, csv, getopt, re
import os
import math
from itertools import ifilter
##Liye own common function,class loading
from Constant_Library import *
from General_Library import *
from File_Class import * ###
from Sequencing_Library import *
OUTPUT_SEP_CHAR='\t'
#exit if not enough arguments
if len(sys.argv) < 3:
print __doc__
sys.exit(0)
###set default value
suffix="txt"
infile=None
infile_skip=0
sep_char='\t'
sep_gene=','
header_file=None
unique_id_length=2
parameter_file=None
INPUT_PATH=os.getcwd()
OUTPUT_PATH=os.getcwd()
prefix="output"
OUTPUT_SUFFIX="txt"
reference=None
full_reference=None
UPSTREAM_BORDER = -100
DOWNSTREAM_BORDER = 100
###get arguments(parameters)
optlist, cmd_list = getopt.getopt(sys.argv[1:], 'hi:b:s:S:r:R:u:d:D:j:I:t:p:L:o:O:z',["test="])
for opt in optlist:
if opt[0] == '-h':
print __doc__; sys.exit(0)
elif opt[0] == '-i': infile = opt[1]
elif opt[0] == '-b': bed_infile = opt[1]
elif opt[0] == '-I': INPUT_PATH = opt[1]
elif opt[0] == '-O': OUTPUT_PATH = opt[1]
elif opt[0] == '-S': OUTPUT_SUFFIX = opt[1]
elif opt[0] == '-s': suffix = opt[1]
elif opt[0] == '-d': DOWNSTREAM_BORDER = int(opt[1])
elif opt[0] == '-u': UPSTREAM_BORDER = int(opt[1])
elif opt[0] == '-D': sep_gene =opt[1]
elif opt[0] == '-j': infile_skip= int(opt[1])
elif opt[0] == '-r': reference = opt[1]
elif opt[0] == '-R': full_reference = opt[1]
elif opt[0] == '-o': prefix = opt[1]
elif opt[0] == '-L': unique_id_length = int(opt[1])
elif opt[0] == '--test': long_input = opt[1]
#print "Test long input", long_input
if infile==None:
infiles=CurrentFolder_to_Infiles(INPUT_PATH, suffix)
bed_infiles=CurrentFolder_to_Infiles(INPUT_PATH, "bed")
else:
infiles=[infile]
bed_infiles = [bed_infile]
##perform specific functions
if reference!=None and full_reference!=None:
specific_function(infiles,bed_infiles,reference,full_reference)
|
|
import ms_utils
import numpy
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42 #leaves most text as actual text in PDFs, not outlines
import os
import scipy.stats as stats
import operator
def all_library_rpm_scatter(mse):
output_file = os.path.join(
mse.settings.get_rdir(),
'plots',
'all_scatter_plots.pdf')
num_libs = len(mse.libs)
num_plots_wide = num_libs-1
num_plots_high = num_libs-1
fig = plt.figure(figsize=(24,24))
for i in range(len(mse.libs)):
for j in range(i+1, len(mse.libs)):
plot_index = (j-1)*(num_plots_wide)+(i+1)
plot = fig.add_subplot(num_plots_high, num_plots_wide, plot_index)
if j == num_plots_high:
plot.set_xlabel("%s RPM" % (mse.libs[i].lib_settings.sample_name))
if i == 0:
plot.set_ylabel("%s RPM" % (mse.libs[j].lib_settings.sample_name))
plot.set_xscale('symlog', linthreshx=0.1)
plot.set_yscale('symlog', linthreshy=0.1)
x = mse.libs[i].name_sorted_rpms()
y = mse.libs[j].name_sorted_rpms()
plot.scatter(x, y, color=ms_utils.black, s=3)
plot.plot(numpy.arange(0,1000000,1), numpy.arange(0,1000000,1), color=ms_utils.vermillion, lw = 1, linestyle='dashed')
rho, pval = stats.spearmanr(x, y)
plot.annotate('rho=%.3f' % (rho), xy=(0, 0.8), xytext=(0, 0.8), textcoords='axes fraction')
plot.set_xlim(0, 1000000)
plot.set_ylim(0, 1000000)
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05, wspace=0.2, hspace=0.2)
plt.savefig(output_file, transparent='True', format='pdf')
def monosome_over_mrnp_reproducibility(mse):
output_file = os.path.join(
mse.settings.get_rdir(),
'plots',
'mono_over_mRNP_plots.pdf')
num_libs = len(mse.monosome_libs)
num_plots_wide = num_libs-1
num_plots_high = num_libs-1
fig = plt.figure(figsize=(8,8))
for i in range(len(mse.monosome_libs)):
for j in range(i+1, len(mse.monosome_libs)):
plot_index = (j-1)*(num_plots_wide)+(i+1)
plot = fig.add_subplot(num_plots_high, num_plots_wide, plot_index)
if j == num_plots_high:
plot.set_xlabel("%s / %s RPM" % (mse.monosome_libs[i].lib_settings.sample_name, mse.mrnp_libs[i].lib_settings.sample_name))
if i == 0:
plot.set_ylabel("%s / %s RPM" % (mse.monosome_libs[j].lib_settings.sample_name, mse.mrnp_libs[j].lib_settings.sample_name))
plot.set_xscale('symlog', linthreshx=0.01)
plot.set_yscale('symlog', linthreshy=0.01)
x = mse.monosome_libs[i].name_sorted_rpms()/mse.mrnp_libs[i].name_sorted_rpms()
y = mse.monosome_libs[j].name_sorted_rpms()/mse.mrnp_libs[j].name_sorted_rpms()
plot.scatter(x, y, color=ms_utils.black, s=3)
plot.plot(numpy.arange(0,1000,1), numpy.arange(0,1000,1), color=ms_utils.vermillion, lw = 1, linestyle='dashed')
rho, pval = stats.spearmanr(x, y)
fx, fy = ms_utils.filter_x_y_pairs(x, y)
r, p = stats.pearsonr(fx, fy)
plot.annotate('rho,r=%.3f,%.3f' % (rho, r), xy=(0, 0.9), xytext=(0, 0.9), textcoords='axes fraction')
plot.set_xlim(0, 1000)
plot.set_ylim(0, 1000)
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.2, hspace=0.2)
plt.savefig(output_file, transparent='True', format='pdf')
def monosome_over_total_reproducibility(mse):
output_file = os.path.join(
mse.settings.get_rdir(),
'plots',
'mono_over_total_plots.pdf')
num_libs = len(mse.monosome_libs)
num_plots_wide = num_libs-1
num_plots_high = num_libs-1
fig = plt.figure(figsize=(8,8))
for i in range(len(mse.monosome_libs)):
for j in range(i+1, len(mse.monosome_libs)):
plot_index = (j-1)*(num_plots_wide)+(i+1)
plot = fig.add_subplot(num_plots_high, num_plots_wide, plot_index)
if j == num_plots_high:
plot.set_xlabel("%s / %s RPM" % (mse.monosome_libs[i].lib_settings.sample_name, mse.total_libs[i].lib_settings.sample_name))
if i == 0:
plot.set_ylabel("%s / %s RPM" % (mse.monosome_libs[j].lib_settings.sample_name, mse.total_libs[j].lib_settings.sample_name))
plot.set_xscale('symlog', linthreshx=0.01)
plot.set_yscale('symlog', linthreshy=0.01)
x = mse.monosome_libs[i].name_sorted_rpms()/mse.total_libs[i].name_sorted_rpms()
y = mse.monosome_libs[j].name_sorted_rpms()/mse.total_libs[j].name_sorted_rpms()
plot.scatter(x, y, color=ms_utils.black, s=3)
plot.plot(numpy.arange(0,1000,1), numpy.arange(0,1000,1), color=ms_utils.vermillion, lw = 1, linestyle='dashed')
rho, pval = stats.spearmanr(x, y)
fx, fy = ms_utils.filter_x_y_pairs(x, y)
r, p = stats.pearsonr(fx, fy)
plot.annotate('rho,r=%.3f,%.3f' % (rho, r), xy=(0, 0.9), xytext=(0, 0.9), textcoords='axes fraction')
plot.set_xlim(0, 1000)
plot.set_ylim(0, 1000)
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.2, hspace=0.2)
plt.savefig(output_file, transparent='True', format='pdf')
def monosome_over_mrnp_plus_monosome_reproducibility(mse):
output_file = os.path.join(
mse.settings.get_rdir(),
'plots',
'mono_over_mRNP_plus_mono_plots.pdf')
num_libs = len(mse.monosome_libs)
num_plots_wide = num_libs-1
num_plots_high = num_libs-1
fig = plt.figure(figsize=(8,8))
for i in range(len(mse.monosome_libs)):
for j in range(i+1, len(mse.monosome_libs)):
plot_index = (j-1)*(num_plots_wide)+(i+1)
plot = fig.add_subplot(num_plots_high, num_plots_wide, plot_index)
if j == num_plots_high:
plot.set_xlabel("%s / (%s+%s) RPM" % (mse.monosome_libs[i].lib_settings.sample_name,
mse.monosome_libs[i].lib_settings.sample_name,
mse.mrnp_libs[i].lib_settings.sample_name))
if i == 0:
plot.set_ylabel("%s / (%s+%s) RPM" % (mse.monosome_libs[j].lib_settings.sample_name,
mse.monosome_libs[j].lib_settings.sample_name,
mse.mrnp_libs[j].lib_settings.sample_name))
#plot.set_xscale('symlog', linthreshx=0.01)
#plot.set_yscale('symlog', linthreshy=0.01)
x = mse.monosome_libs[i].name_sorted_rpms()/(mse.mrnp_libs[i].name_sorted_rpms()+mse.monosome_libs[i].name_sorted_rpms())
y = mse.monosome_libs[j].name_sorted_rpms()/(mse.mrnp_libs[j].name_sorted_rpms()+mse.monosome_libs[j].name_sorted_rpms())
plot.scatter(x, y, color=ms_utils.black, s=3)
plot.plot(numpy.arange(0,1000,1), numpy.arange(0,1000,1), color=ms_utils.vermillion, lw = 1, linestyle='dashed')
rho, pval = stats.spearmanr(x, y)
fx, fy = ms_utils.filter_x_y_pairs(x, y)
r, p = stats.pearsonr(fx, fy)
plot.annotate('rho,r=%.3f,%.3f' % (rho, r), xy=(0, 0.9), xytext=(0, 0.9), textcoords='axes fraction')
plot.set_xlim(0, 1)
plot.set_ylim(0, 1)
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.2, hspace=0.2)
plt.savefig(output_file, transparent='True', format='pdf')
def recruitment_change_rank_value_plot_interactive(mse, annotation_file, read_cutoff = 128, corrected_p_cutoff = 0.05):
from bokeh.plotting import figure, output_file, show, save, ColumnDataSource, gridplot
from bokeh.models import Range1d
from bokeh.models import HoverTool
from collections import OrderedDict
set_name1, set_name2, matched_set = mse.parse_matched_set_annotation(annotation_file)
# output to static HTML file
output_file_name = os.path.join(
mse.settings.get_rdir(),
'plots',
'%s_%s_recruitment_change_rank_value.html' % (set_name1, set_name2))
output_file(output_file_name)
all_change_scores = {}
all_change_score_means = {}
all_p_values = {}
all_annotations = {}
for matched_pool_seqs in matched_set:
set1_scores = []
set2_scores = []
for i in range(len(mse.monosome_libs)):
set_1_counts = mse.monosome_libs[i].get_counts(matched_pool_seqs[0]) \
+ mse.mrnp_libs[i].get_counts(matched_pool_seqs[0])
set_2_counts = mse.monosome_libs[i].get_counts(matched_pool_seqs[1]) \
+ mse.mrnp_libs[i].get_counts(matched_pool_seqs[1])
# include only comparisons where the average number of reads is high enough
if set_1_counts >= read_cutoff and set_2_counts >= read_cutoff:
set1_score = mse.monosome_libs[i].get_rpm(matched_pool_seqs[0]) / \
(mse.monosome_libs[i].get_rpm(matched_pool_seqs[0]) +
mse.mrnp_libs[i].get_rpm(matched_pool_seqs[0]))
set2_score = mse.monosome_libs[i].get_rpm(matched_pool_seqs[1]) / \
(mse.monosome_libs[i].get_rpm(matched_pool_seqs[1]) +
mse.mrnp_libs[i].get_rpm(matched_pool_seqs[1]))
else:
set1_score = float('nan')
set2_score = float('nan')
set1_scores.append(set1_score)
set2_scores.append(set2_score)
scores_1_filtered, scores_2_filtered = ms_utils.filter_x_y_pairs(set1_scores, set2_scores)
recruitment_changes = numpy.array(scores_1_filtered) - numpy.array(scores_2_filtered)
if len(scores_1_filtered) > 0 and len(scores_2_filtered) > 0:
comparison = (matched_pool_seqs[0], matched_pool_seqs[1])
t, p = stats.ttest_ind(scores_1_filtered, scores_2_filtered)
all_change_scores[comparison] = recruitment_changes
all_p_values[comparison] = p
average = numpy.average(recruitment_changes)
all_change_score_means[comparison] = average
all_annotations[comparison] = '%s-%s=%.3f, p=%f' % (matched_pool_seqs[0], matched_pool_seqs[1], average, p)
bh_corrected_p_values = ms_utils.bonferroniCorrection(all_p_values)
sorted_means = sorted(all_change_score_means.iteritems(), key=operator.itemgetter(1))
sig_ranks = [] #will store rank values for passing p values
sig_means = []
sig_com1 = []
sig_com2 = []
sig_p = []
sig_n = []
insig_ranks = [] #will store rank values for failing p values
insig_means = []
insig_anno = []
insig_com1 = []
insig_com2 = []
insig_p = []
insig_n = []
for rank in range(len(sorted_means)):
comparison, mean = sorted_means[rank]
if bh_corrected_p_values[comparison]<corrected_p_cutoff:
sig_ranks.append(rank)
sig_means.append(mean)
sig_com1.append(comparison[0])
sig_com2.append(comparison[1])
sig_p.append(bh_corrected_p_values[comparison])
sig_n.append(len(all_change_scores[comparison]))
else:
insig_ranks.append(rank)
insig_means.append(mean)
insig_anno.append(all_annotations[comparison])
insig_com1.append(comparison[0])
insig_com2.append(comparison[1])
insig_p.append(bh_corrected_p_values[comparison])
insig_n.append(len(all_change_scores[comparison]))
all_ranks = range(len(sorted_means))
all_max = [max(all_change_scores[sorted_means[rank][0]]) for rank in all_ranks]
all_min = [min(all_change_scores[sorted_means[rank][0]]) for rank in all_ranks]
source = ColumnDataSource(data=dict(x=insig_ranks, y=insig_means, com1=insig_com1, com2=insig_com2, p=insig_p, n=insig_n, value=insig_means))
sig_source = ColumnDataSource(data=dict(x=sig_ranks, y=sig_means, com1=sig_com1, com2=sig_com2, p=sig_p, n=sig_n, value=sig_means))
max_source = ColumnDataSource(data=dict(x=all_ranks, y=all_max))
min_source = ColumnDataSource(data=dict(x=all_ranks, y=all_min))
hover = HoverTool(names=['insig', 'sig'])
TOOLS = "pan,wheel_zoom,reset,save"
PlotFig = figure(x_axis_label="rank", y_axis_label="%s-%s recruitment change" % (set_name1, set_name2),
tools=[TOOLS,hover], toolbar_location="right")
PlotFig.circle("x", "y", size=5, source=source, color=ms_utils.bokeh_black, name = 'insig')
PlotFig.circle("x", "y", size=5, source=sig_source, color=ms_utils.bokeh_vermillion, name = 'sig')
# adjust what information you get when you hover over it
hover.tooltips = OrderedDict([("%s" % set_name1, "@com1"),
("%s" % set_name2, "@com2"),
("mean", "@value"),
("Bonf. p", "@p"),
("n", "@n")])
PlotFig.line("x", "y", line_width=1, source=min_source, color=ms_utils.bokeh_skyBlue)
PlotFig.line("x", "y", line_width=1, source=max_source, color=ms_utils.bokeh_skyBlue)
PlotFig.x_range = Range1d(start=-1, end=len(sorted_means))
PlotFig.y_range = Range1d(start=-1, end=1)
save(PlotFig)
def recruitment_fold_change_rank_value_plot_interactive(mse, annotation_file, read_cutoff = 128, corrected_p_cutoff = 0.05):
from bokeh.plotting import figure, output_file, show, save, ColumnDataSource, gridplot
from bokeh.models import Range1d
from bokeh.models import HoverTool
from collections import OrderedDict
set_name1, set_name2, matched_set = mse.parse_matched_set_annotation(annotation_file)
# output to static HTML file
output_file_name = os.path.join(
mse.settings.get_rdir(),
'plots',
'%s_%s_recruitment_fold_change_rank_value.html' % (set_name1, set_name2))
output_file(output_file_name)
all_change_scores = {}
all_change_score_means = {}
all_p_values = {}
all_annotations = {}
for matched_pool_seqs in matched_set:
set1_scores = []
set2_scores = []
for i in range(len(mse.monosome_libs)):
set_1_counts = mse.monosome_libs[i].get_counts(matched_pool_seqs[0]) \
+ mse.mrnp_libs[i].get_counts(matched_pool_seqs[0])
set_2_counts = mse.monosome_libs[i].get_counts(matched_pool_seqs[1]) \
+ mse.mrnp_libs[i].get_counts(matched_pool_seqs[1])
# include only comparisons where the average number of reads is high enough
if set_1_counts >= read_cutoff and set_2_counts >= read_cutoff:
set1_score = mse.monosome_libs[i].get_rpm(matched_pool_seqs[0]) / \
(mse.monosome_libs[i].get_rpm(matched_pool_seqs[0]) +
mse.mrnp_libs[i].get_rpm(matched_pool_seqs[0]))
set2_score = mse.monosome_libs[i].get_rpm(matched_pool_seqs[1]) / \
(mse.monosome_libs[i].get_rpm(matched_pool_seqs[1]) +
mse.mrnp_libs[i].get_rpm(matched_pool_seqs[1]))
else:
set1_score = float('nan')
set2_score = float('nan')
set1_scores.append(set1_score)
set2_scores.append(set2_score)
scores_1_filtered, scores_2_filtered = ms_utils.filter_x_y_pairs(set1_scores, set2_scores)
recruitment_changes = numpy.array(scores_1_filtered) / numpy.array(scores_2_filtered)
if len(scores_1_filtered) > 0 and len(scores_2_filtered) > 0:
comparison = (matched_pool_seqs[0], matched_pool_seqs[1])
t, p = stats.ttest_ind(scores_1_filtered, scores_2_filtered)
all_change_scores[comparison] = recruitment_changes
all_p_values[comparison] = p
average = numpy.average(recruitment_changes)
all_change_score_means[comparison] = average
all_annotations[comparison] = '%s-%s=%.3f, p=%f' % (matched_pool_seqs[0], matched_pool_seqs[1], average, p)
bh_corrected_p_values = ms_utils.bonferroniCorrection(all_p_values)
sorted_means = sorted(all_change_score_means.iteritems(), key=operator.itemgetter(1))
sig_ranks = [] #will store rank values for passing p values
sig_means = []
sig_com1 = []
sig_com2 = []
sig_p = []
sig_n = []
insig_ranks = [] #will store rank values for failing p values
insig_means = []
insig_anno = []
insig_com1 = []
insig_com2 = []
insig_p = []
insig_n = []
for rank in range(len(sorted_means)):
comparison, mean = sorted_means[rank]
if bh_corrected_p_values[comparison]<corrected_p_cutoff:
sig_ranks.append(rank)
sig_means.append(mean)
sig_com1.append(comparison[0])
sig_com2.append(comparison[1])
sig_p.append(bh_corrected_p_values[comparison])
sig_n.append(len(all_change_scores[comparison]))
else:
insig_ranks.append(rank)
insig_means.append(mean)
insig_anno.append(all_annotations[comparison])
insig_com1.append(comparison[0])
insig_com2.append(comparison[1])
insig_p.append(bh_corrected_p_values[comparison])
insig_n.append(len(all_change_scores[comparison]))
all_ranks = range(len(sorted_means))
all_max = [max(all_change_scores[sorted_means[rank][0]]) for rank in all_ranks]
all_min = [min(all_change_scores[sorted_means[rank][0]]) for rank in all_ranks]
source = ColumnDataSource(data=dict(x=insig_ranks, y=insig_means, com1=insig_com1, com2=insig_com2, p=insig_p, n=insig_n, value=insig_means))
sig_source = ColumnDataSource(data=dict(x=sig_ranks, y=sig_means, com1=sig_com1, com2=sig_com2, p=sig_p, n=sig_n, value=sig_means))
max_source = ColumnDataSource(data=dict(x=all_ranks, y=all_max))
min_source = ColumnDataSource(data=dict(x=all_ranks, y=all_min))
hover = HoverTool(names=['insig', 'sig'])
TOOLS = "pan,wheel_zoom,reset,save"
PlotFig = figure(x_axis_label="rank", y_axis_label="%s/%s fold recruitment change" % (set_name1, set_name2),
tools=[TOOLS,hover], toolbar_location="right", y_axis_type="log")
PlotFig.circle("x", "y", size=5, source=source, color=ms_utils.bokeh_black, name = 'insig')
PlotFig.circle("x", "y", size=5, source=sig_source, color=ms_utils.bokeh_vermillion, name = 'sig')
# adjust what information you get when you hover over it
hover.tooltips = OrderedDict([("%s" % set_name1, "@com1"),
("%s" % set_name2, "@com2"),
("mean", "@value"),
("Bonf. p", "@p"),
("n", "@n")])
PlotFig.line("x", "y", line_width=1, source=min_source, color=ms_utils.bokeh_skyBlue)
PlotFig.line("x", "y", line_width=1, source=max_source, color=ms_utils.bokeh_skyBlue)
PlotFig.x_range = Range1d(start=-1, end=len(sorted_means))
PlotFig.y_range = Range1d(start=.01, end=100)
save(PlotFig)
def plot_recruitment_violins(mse, annotation_file, read_cutoff = 128):
#Makes violin plots of recruitment scores
set_name1, set_name2, matched_set = mse.parse_matched_set_annotation(annotation_file)
# output to static HTML file
output_file_name = os.path.join(
mse.settings.get_rdir(),
'plots',
'%s_%s_violin.pdf' % (set_name1, set_name2))
legends = []
data = []
set1_seqs = [pair[0] for pair in matched_set]
set2_seqs = [pair[1] for pair in matched_set]
all_seqs = mse.monosome_libs[0].sorted_names()
set_members = {set_name1:set1_seqs, set_name2:set2_seqs, 'all':all_seqs}
for lib_index in range(len(mse.monosome_libs)):
scores_dict = {}
for set_type in [set_name1, set_name2]:
legends.append('%s %s' % (mse.monosome_libs[lib_index].lib_settings.sample_name, set_type))
scores = []
for seq_name in set_members[set_type]:
counts = mse.monosome_libs[lib_index].get_counts(seq_name) \
+ mse.mrnp_libs[lib_index].get_counts(seq_name)
if counts >= read_cutoff:
recruitment_score = mse.monosome_libs[lib_index].get_rpm(seq_name) / \
(mse.monosome_libs[lib_index].get_rpm(seq_name) +
mse.mrnp_libs[lib_index].get_rpm(seq_name))
scores.append(recruitment_score)
data.append(scores)
scores_dict[set_type] = scores
u, up = stats.mannwhitneyu(scores_dict[set_name1], scores_dict[set_name2],)
print mse.monosome_libs[lib_index].lib_settings.sample_name, set_name1, set_name2, up
p_file_name = os.path.join(
mse.settings.get_rdir(),
'plots',
'%s_%s_violin.ks_p.txt' % (set_name1, set_name2))
g=open(p_file_name, 'w')
g.write('datset1\tdataset2\tKS d\tKS p\tt-test t\tt-test p\n')
for i in range(len(legends)):
for j in range(len(legends)):
d, p = stats.ks_2samp(data[i], data[j])
tind, pind = stats.ttest_ind(data[i], data[j])
g.write('%s\t%s\t%.3f\t%.3e\t%.3f\t%.3e\t\n' % (legends[i], legends[j], d, p, tind, pind))
g.close()
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Hide the grid behind plot objects
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_ylabel('monosome recruitment score')
#ax1.set_xlabel(ylabel)
plt.subplots_adjust(left=0.1, right=0.95, top=0.9, bottom=0.25)
pos = range(1,len(data)+1) # starts at 1 to play nice with boxplot
dist = max(pos)-min(pos)
w = min(0.15*max(dist,1.0),0.5)
for d,p in zip(data,pos):
d = [float(dm) for dm in d]
k = stats.gaussian_kde(d) #calculates the kernel density
m = k.dataset.min() #lower bound of violin
M = k.dataset.max() #upper bound of violin
x = numpy.arange(m,M,(M-m)/100.) # support for violin
v = k.evaluate(x) #violin profile (density curve)
#print 'v=',v
v = v/v.max()*w #scaling the violin to the available space
if 'all' in legends[p-1]:
color = (0, 0, 0)
elif set_name1 in legends[p-1]:
color = (0/255., 159/255., 115/255)
elif set_name2 in legends[p-1]:
color = (213/255., 94/255., 0)
else:
print legends[p-1]
plt.fill_betweenx(x,p,v+p,facecolor=color,alpha=0.3)
plt.fill_betweenx(x,p,-v+p,facecolor=color,alpha=0.3)
if True:
bplot = plt.boxplot(data,notch=1)
plt.setp(bplot['boxes'], color='black')
plt.setp(bplot['whiskers'], color='black')
plt.setp(bplot['fliers'], color='red', marker='.')
per50s = []
i = 1
for datum in data:
#per50s.append(stats.scoreatpercentile(datum, 50))
t = stats.scoreatpercentile(datum, 50)
per50s.append(t)
ax1.annotate(str(round(t,3)), xy=(i+0.1, t), xycoords='data', arrowprops=None, fontsize='small', color='black')
i+= 1
#ax1.set_xticks([0.0, 0.5, 1.0, 1.5])
ax1.set_ylim(0, 1)
xtickNames = plt.setp(ax1, xticklabels=legends)
plt.setp(xtickNames, rotation=90, fontsize=6)
plt.savefig(output_file_name, transparent='True', format='pdf')
def recruitment_change_rank_value_plot_static(mse, annotation_file, read_cutoff = 128, corrected_p_cutoff = 0.05):
#Makes violin plots of recruitment scores
set_name1, set_name2, matched_set = mse.parse_matched_set_annotation(annotation_file)
# output to static HTML file
output_file_name = os.path.join(
mse.settings.get_rdir(),
'plots',
'%s_%s_rank_change.pdf' % (set_name1, set_name2))
all_change_scores = {}
all_change_score_means = {}
all_p_values = {}
all_annotations = {}
for matched_pool_seqs in matched_set:
set1_scores = []
set2_scores = []
for i in range(len(mse.monosome_libs)):
set_1_counts = mse.monosome_libs[i].get_counts(matched_pool_seqs[0]) \
+ mse.mrnp_libs[i].get_counts(matched_pool_seqs[0])
set_2_counts = mse.monosome_libs[i].get_counts(matched_pool_seqs[1]) \
+ mse.mrnp_libs[i].get_counts(matched_pool_seqs[1])
# include only comparisons where the average number of reads is high enough
if set_1_counts >= read_cutoff and set_2_counts >= read_cutoff:
set1_score = mse.monosome_libs[i].get_rpm(matched_pool_seqs[0]) / \
(mse.monosome_libs[i].get_rpm(matched_pool_seqs[0]) +
mse.mrnp_libs[i].get_rpm(matched_pool_seqs[0]))
set2_score = mse.monosome_libs[i].get_rpm(matched_pool_seqs[1]) / \
(mse.monosome_libs[i].get_rpm(matched_pool_seqs[1]) +
mse.mrnp_libs[i].get_rpm(matched_pool_seqs[1]))
else:
set1_score = float('nan')
set2_score = float('nan')
set1_scores.append(set1_score)
set2_scores.append(set2_score)
scores_1_filtered, scores_2_filtered = ms_utils.filter_x_y_pairs(set1_scores, set2_scores)
recruitment_changes = numpy.array(scores_1_filtered) - numpy.array(scores_2_filtered)
if len(scores_1_filtered) > 0 and len(scores_2_filtered) > 0:
comparison = (matched_pool_seqs[0], matched_pool_seqs[1])
t, p = stats.ttest_ind(scores_1_filtered, scores_2_filtered)
all_change_scores[comparison] = recruitment_changes
all_p_values[comparison] = p
average = numpy.average(recruitment_changes)
all_change_score_means[comparison] = average
all_annotations[comparison] = '%s-%s=%.3f, p=%f' % (matched_pool_seqs[0], matched_pool_seqs[1], average, p)
bh_corrected_p_values = ms_utils.bonferroniCorrection(all_p_values)
sorted_means = sorted(all_change_score_means.iteritems(), key=operator.itemgetter(1))
sig_ranks = [] #will store rank values for passing p values
sig_means = []
sig_com1 = []
sig_com2 = []
sig_p = []
sig_n = []
insig_ranks = [] #will store rank values for failing p values
insig_means = []
insig_anno = []
insig_com1 = []
insig_com2 = []
insig_p = []
insig_n = []
for rank in range(len(sorted_means)):
comparison, mean = sorted_means[rank]
if bh_corrected_p_values[comparison]<corrected_p_cutoff:
sig_ranks.append(rank)
sig_means.append(mean)
sig_com1.append(comparison[0])
sig_com2.append(comparison[1])
sig_p.append(bh_corrected_p_values[comparison])
sig_n.append(len(all_change_scores[comparison]))
else:
insig_ranks.append(rank)
insig_means.append(mean)
insig_anno.append(all_annotations[comparison])
insig_com1.append(comparison[0])
insig_com2.append(comparison[1])
insig_p.append(bh_corrected_p_values[comparison])
insig_n.append(len(all_change_scores[comparison]))
all_ranks = range(len(sorted_means))
all_max = [max(all_change_scores[sorted_means[rank][0]]) for rank in all_ranks]
all_min = [min(all_change_scores[sorted_means[rank][0]]) for rank in all_ranks]
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(111)
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey')
ax1.set_ylabel("%s-%s recruitment change" % (set_name1, set_name2))
ax1.set_xlabel('rank')
ax1.scatter(insig_ranks, insig_means, color = ms_utils.black)
ax1.scatter(sig_ranks, sig_means, color=ms_utils.vermillion)
ax1.plot(all_ranks, all_max, color=ms_utils.skyBlue)
ax1.plot(all_ranks, all_min, color=ms_utils.skyBlue)
ax1.set_ylim(-1, 1)
ax1.set_xlim(-1, max(all_ranks)+1)
plt.savefig(output_file_name, transparent='True', format='pdf')
def reverse_recruitment_change_rank_value_plot_static(mse, annotation_file, read_cutoff = 128, corrected_p_cutoff = 0.05):
#Makes violin plots of recruitment scores
set_name1, set_name2, matched_set = mse.parse_matched_set_annotation(annotation_file)
# output to static HTML file
output_file_name = os.path.join(
mse.settings.get_rdir(),
'plots',
'%s_%s_rank_change.pdf' % (set_name2, set_name1))
all_change_scores = {}
all_change_score_means = {}
all_p_values = {}
all_annotations = {}
for matched_pool_seqs in matched_set:
set1_scores = []
set2_scores = []
for i in range(len(mse.monosome_libs)):
set_1_counts = mse.monosome_libs[i].get_counts(matched_pool_seqs[0]) \
+ mse.mrnp_libs[i].get_counts(matched_pool_seqs[0])
set_2_counts = mse.monosome_libs[i].get_counts(matched_pool_seqs[1]) \
+ mse.mrnp_libs[i].get_counts(matched_pool_seqs[1])
# include only comparisons where the average number of reads is high enough
if set_1_counts >= read_cutoff and set_2_counts >= read_cutoff:
set1_score = mse.monosome_libs[i].get_rpm(matched_pool_seqs[0]) / \
(mse.monosome_libs[i].get_rpm(matched_pool_seqs[0]) +
mse.mrnp_libs[i].get_rpm(matched_pool_seqs[0]))
set2_score = mse.monosome_libs[i].get_rpm(matched_pool_seqs[1]) / \
(mse.monosome_libs[i].get_rpm(matched_pool_seqs[1]) +
mse.mrnp_libs[i].get_rpm(matched_pool_seqs[1]))
else:
set1_score = float('nan')
set2_score = float('nan')
set1_scores.append(set1_score)
set2_scores.append(set2_score)
scores_1_filtered, scores_2_filtered = ms_utils.filter_x_y_pairs(set1_scores, set2_scores)
recruitment_changes = numpy.array(scores_2_filtered) - numpy.array(scores_1_filtered)
if len(scores_1_filtered) > 0 and len(scores_2_filtered) > 0:
comparison = (matched_pool_seqs[1], matched_pool_seqs[0])
t, p = stats.ttest_ind(scores_1_filtered, scores_2_filtered)
all_change_scores[comparison] = recruitment_changes
all_p_values[comparison] = p
average = numpy.average(recruitment_changes)
all_change_score_means[comparison] = average
all_annotations[comparison] = '%s-%s=%.3f, p=%f' % (matched_pool_seqs[1], matched_pool_seqs[0], average, p)
bh_corrected_p_values = ms_utils.bonferroniCorrection(all_p_values)
sorted_means = sorted(all_change_score_means.iteritems(), key=operator.itemgetter(1))
sig_ranks = [] #will store rank values for passing p values
sig_means = []
sig_com1 = []
sig_com2 = []
sig_p = []
sig_n = []
insig_ranks = [] #will store rank values for failing p values
insig_means = []
insig_anno = []
insig_com1 = []
insig_com2 = []
insig_p = []
insig_n = []
for rank in range(len(sorted_means)):
comparison, mean = sorted_means[rank]
if bh_corrected_p_values[comparison]<corrected_p_cutoff:
sig_ranks.append(rank)
sig_means.append(mean)
sig_com1.append(comparison[0])
sig_com2.append(comparison[1])
sig_p.append(bh_corrected_p_values[comparison])
sig_n.append(len(all_change_scores[comparison]))
else:
insig_ranks.append(rank)
insig_means.append(mean)
insig_anno.append(all_annotations[comparison])
insig_com1.append(comparison[0])
insig_com2.append(comparison[1])
insig_p.append(bh_corrected_p_values[comparison])
insig_n.append(len(all_change_scores[comparison]))
all_ranks = range(len(sorted_means))
all_max = [max(all_change_scores[sorted_means[rank][0]]) for rank in all_ranks]
all_min = [min(all_change_scores[sorted_means[rank][0]]) for rank in all_ranks]
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(111)
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey')
ax1.set_ylabel("%s-%s recruitment change" % (set_name2, set_name1))
ax1.set_xlabel('rank')
ax1.scatter(insig_ranks, insig_means, color = ms_utils.black)
ax1.scatter(sig_ranks, sig_means, color=ms_utils.vermillion)
ax1.plot(all_ranks, all_max, color=ms_utils.skyBlue)
ax1.plot(all_ranks, all_min, color=ms_utils.skyBlue)
ax1.set_ylim(-1, 1)
ax1.set_xlim(-1, max(all_ranks)+1)
plt.savefig(output_file_name, transparent='True', format='pdf')
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import json
from cinderclient import exceptions as cinder_exp
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.objects import resource_data as resource_data_object
from heat.tests.nova import fakes as fakes_nova
from heat.tests import test_volume_utils as vt_base
from heat.tests import utils
cinder_volume_template = '''
heat_template_version: 2013-05-23
description: Cinder volumes and attachments.
resources:
volume:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 1
name: test_name
description: test_description
metadata:
key: value
volume2:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 2
volume3:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 1
name: test_name
scheduler_hints: {"hint1": "good_advice"}
attachment:
type: OS::Cinder::VolumeAttachment
properties:
instance_uuid: WikiDatabase
volume_id: { get_resource: volume }
mountpoint: /dev/vdc
'''
single_cinder_volume_template = '''
heat_template_version: 2013-05-23
description: Cinder volume
resources:
volume:
type: OS::Cinder::Volume
properties:
size: 1
name: test_name
description: test_description
'''
class CinderVolumeTest(vt_base.BaseVolumeTest):
def setUp(self):
super(CinderVolumeTest, self).setUp()
self.t = template_format.parse(cinder_volume_template)
self.use_cinder = True
def _mock_create_volume(self, fv, stack_name, size=1,
final_status='available'):
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=size, availability_zone='nova',
description='test_description',
name='test_name',
metadata={'key': 'value'}).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume(final_status, id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
return fv_ready
def test_cinder_volume_size_constraint(self):
self.t['resources']['volume']['properties']['size'] = 0
stack = utils.parse_stack(self.t)
error = self.assertRaises(exception.StackValidationFailed,
self.create_volume,
self.t, stack, 'volume')
self.assertEqual(
"Property error : resources.volume.properties.size: "
"0 is out of range (min: 1, max: None)", six.text_type(error))
def test_cinder_create(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_cvolume_stack'
self.stub_SnapshotConstraint_validate()
self.stub_VolumeConstraint_validate()
self.stub_VolumeTypeConstraint_validate()
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='test_description',
name='test_name',
metadata={'key': 'value'},
volume_type='lvm').AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'].update({
'volume_type': 'lvm',
})
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_create_from_image(self):
fv = vt_base.FakeVolume('downloading')
stack_name = 'test_cvolume_create_from_img_stack'
image_id = '46988116-6703-4623-9dbc-2bc6d284021b'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id')
glance.GlanceClientPlugin.get_image_id(
image_id).MultipleTimes().AndReturn(image_id)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='ImageVolumeDescription',
name='ImageVolume',
imageRef=image_id).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'name': 'ImageVolume',
'description': 'ImageVolumeDescription',
'availability_zone': 'nova',
'image': image_id,
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_create_with_read_only(self):
fv = vt_base.FakeVolume('with_read_only_access_mode')
stack_name = 'test_create_with_read_only'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='ImageVolumeDescription',
name='ImageVolume').AndReturn(fv)
update_readonly_mock = self.patchobject(self.cinder_fc.volumes,
'update_readonly_flag')
update_readonly_mock(fv.id, False).return_value(None)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'name': 'ImageVolume',
'description': 'ImageVolumeDescription',
'availability_zone': 'nova',
'read_only': False,
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_default(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_cvolume_default_stack'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
vol_name = utils.PhysName(stack_name, 'volume')
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description=None,
name=vol_name).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'availability_zone': 'nova',
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_fn_getatt(self):
stack_name = 'test_cvolume_fngetatt_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
fv = vt_base.FakeVolume(
'available', availability_zone='zone1',
size=1, snapshot_id='snap-123', name='name',
description='desc', volume_type='lvm',
metadata={'key': 'value'}, source_volid=None,
bootable=False, created_at='2013-02-25T02:40:21.000000',
encrypted=False, attachments=[])
self.cinder_fc.volumes.get('vol-123').MultipleTimes().AndReturn(fv)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.assertEqual(u'zone1', rsrc.FnGetAtt('availability_zone'))
self.assertEqual(u'1', rsrc.FnGetAtt('size'))
self.assertEqual(u'snap-123', rsrc.FnGetAtt('snapshot_id'))
self.assertEqual(u'name', rsrc.FnGetAtt('display_name'))
self.assertEqual(u'desc', rsrc.FnGetAtt('display_description'))
self.assertEqual(u'lvm', rsrc.FnGetAtt('volume_type'))
self.assertEqual(json.dumps({'key': 'value'}),
rsrc.FnGetAtt('metadata'))
self.assertEqual({'key': 'value'},
rsrc.FnGetAtt('metadata_values'))
self.assertEqual(u'None', rsrc.FnGetAtt('source_volid'))
self.assertEqual(u'available', rsrc.FnGetAtt('status'))
self.assertEqual(u'2013-02-25T02:40:21.000000',
rsrc.FnGetAtt('created_at'))
self.assertEqual(u'False', rsrc.FnGetAtt('bootable'))
self.assertEqual(u'False', rsrc.FnGetAtt('encrypted'))
self.assertEqual(u'[]', rsrc.FnGetAtt('attachments'))
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'unknown')
self.assertEqual(
'The Referenced Attribute (volume unknown) is incorrect.',
six.text_type(error))
self.m.VerifyAll()
def test_cinder_attachment(self):
stack_name = 'test_cvolume_attach_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_cinder_volume_shrink_fails(self):
stack_name = 'test_cvolume_shrink_fail_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name, size=2)
# update script
fv = vt_base.FakeVolume('available', size=2)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.m.ReplayAll()
self.t['resources']['volume']['properties']['size'] = 2
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 1
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual('NotSupported: Shrinking volume is not supported.',
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_detached(self):
stack_name = 'test_cvolume_extend_det_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2)
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('available'))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_fails_to_start(self):
stack_name = 'test_cvolume_extend_fail_start_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2).AndRaise(
cinder_exp.OverLimit(413))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn('Over limit', six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_fails_to_complete(self):
stack_name = 'test_cvolume_extend_fail_compl_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2)
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('error_extending'))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn("Volume resize failed - Unknown status error_extending",
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_attached(self):
stack_name = 'test_cvolume_extend_att_stack'
# create script
self.stub_VolumeConstraint_validate()
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
# update script
attachments = [{'id': 'vol-123',
'device': '/dev/vdc',
'server_id': u'WikiDatabase'}]
fv2 = vt_base.FakeVolume('in-use',
attachments=attachments, size=1)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
# detach script
fvd = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fvd)
self.cinder_fc.volumes.get(fvd.id).AndReturn(fvd)
self.fc.volumes.delete_server_volume('WikiDatabase', 'vol-123')
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fvd)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# resize script
self.cinder_fc.volumes.extend(fvd.id, 2)
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('available'))
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.create_attachment(self.t, stack, 'attachment')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_created_from_backup_with_same_size(self):
stack_name = 'test_cvolume_extend_snapsht_stack'
# create script
fvbr = vt_base.FakeBackupRestore('vol-123')
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(
vt_base.FakeVolume('restoring-backup'))
vol_name = utils.PhysName(stack_name, 'volume')
self.cinder_fc.volumes.update('vol-123', description=None,
name=vol_name).AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(
vt_base.FakeVolume('available'))
# update script
fv = vt_base.FakeVolume('available', size=2)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'availability_zone': 'nova',
'backup_id': 'backup-123'
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('available', fv.status)
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_retype(self):
fv = vt_base.FakeVolume('available',
size=1, name='my_vol',
description='test')
stack_name = 'test_cvolume_retype'
new_vol_type = 'new_type'
self.patchobject(cinder.CinderClientPlugin, '_create',
return_value=self.cinder_fc)
self.patchobject(self.cinder_fc.volumes, 'create', return_value=fv)
self.patchobject(self.cinder_fc.volumes, 'get', return_value=fv)
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume2')
props = copy.deepcopy(rsrc.properties.data)
props['volume_type'] = new_vol_type
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
self.patchobject(cinder.CinderClientPlugin, 'get_volume_type',
return_value=new_vol_type)
self.patchobject(self.cinder_fc.volumes, 'retype')
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(1, self.cinder_fc.volumes.retype.call_count)
self.cinder_fc.volume_api_version = 1
new_vol_type_1 = 'new_type_1'
props = copy.deepcopy(rsrc.properties.data)
props['volume_type'] = new_vol_type_1
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
# if the volume api is v1, not support to retype
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual('NotSupported: Using Cinder API V1, '
'volume_type update is not supported.',
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.assertEqual(1, self.cinder_fc.volumes.retype.call_count)
def test_cinder_volume_update_name_and_metadata(self):
# update the name, description and metadata
fv = vt_base.FakeVolume('creating',
size=1, name='my_vol',
description='test')
stack_name = 'test_cvolume_updname_stack'
update_name = 'update_name'
meta = {'Key': 'New Value'}
update_description = 'update_description'
kwargs = {
'name': update_name,
'description': update_description
}
fv = self._mock_create_volume(fv, stack_name)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.update(fv, **kwargs).AndReturn(None)
self.cinder_fc.volumes.update_all_metadata(fv, meta).AndReturn(None)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['name'] = update_name
props['description'] = update_description
props['metadata'] = meta
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
def test_cinder_volume_update_read_only(self):
# update read only access mode
fv = vt_base.FakeVolume('update_read_only_access_mode')
stack_name = 'test_update_read_only'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='test_description',
name='test_name',
metadata={u'key': u'value'}).AndReturn(fv)
update_readonly_mock = self.patchobject(self.cinder_fc.volumes,
'update_readonly_flag')
update_readonly_mock(fv.id, True).return_value(None)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['read_only'] = True
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
def test_cinder_snapshot(self):
stack_name = 'test_cvolume_snpsht_stack'
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('available'))
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['volume']
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.snapshot)()
self.assertEqual((rsrc.SNAPSHOT, rsrc.COMPLETE), rsrc.state)
self.assertEqual({'backup_id': 'backup-123'},
resource_data_object.ResourceData.get_all(rsrc))
self.m.VerifyAll()
def test_cinder_snapshot_error(self):
stack_name = 'test_cvolume_snpsht_err_stack'
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
fail_reason = 'Could not determine which Swift endpoint to use'
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('error', fail_reason=fail_reason))
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['volume']
scheduler.TaskRunner(rsrc.create)()
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.snapshot))
self.assertEqual((rsrc.SNAPSHOT, rsrc.FAILED), rsrc.state)
self.assertIn(fail_reason, rsrc.status_reason)
self.assertEqual({u'backup_id': u'backup-123'},
resource_data_object.ResourceData.get_all(rsrc))
self.m.VerifyAll()
def test_cinder_volume_attachment_update_device(self):
stack_name = 'test_cvolume_attach_udev_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
device=u'/dev/vdd',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
props = copy.deepcopy(rsrc.properties.data)
props['mountpoint'] = '/dev/vdd'
props['volume_id'] = 'vol-123'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_attachment_update_volume(self):
stack_name = 'test_cvolume_attach_uvol_stack'
self.stub_VolumeConstraint_validate()
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
fv2 = vt_base.FakeVolume('creating', id='vol-456')
vol2_name = utils.PhysName(stack_name, 'volume2')
self.cinder_fc.volumes.create(
size=2, availability_zone='nova',
description=None,
name=vol2_name).AndReturn(fv2)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
fv2 = vt_base.FakeVolume('available', id=fv2.id)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
fv2a = vt_base.FakeVolume('attaching', id='vol-456')
self._mock_create_server_volume_script(fv2a, volume='vol-456',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.create_volume(self.t, stack, 'volume2')
rsrc = self.create_attachment(self.t, stack, 'attachment')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
props = copy.deepcopy(rsrc.properties.data)
props['volume_id'] = 'vol-456'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(fv2a.id, rsrc.resource_id)
self.m.VerifyAll()
def test_cinder_volume_attachment_update_server(self):
stack_name = 'test_cvolume_attach_usrv_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
server=u'AnotherServer',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
props = copy.deepcopy(rsrc.properties.data)
props['instance_uuid'] = 'AnotherServer'
props['volume_id'] = 'vol-123'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_create_with_scheduler_hints(self):
fv = vt_base.FakeVolume('creating')
cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, name='test_name', description=None,
availability_zone='nova',
scheduler_hints={'hint1': 'good_advice'}).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
stack_name = 'test_cvolume_scheduler_hints_stack'
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume3')
self.m.VerifyAll()
def test_cinder_create_with_scheduler_hints_and_cinder_api_v1(self):
cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc)
self.cinder_fc.volume_api_version = 1
self.m.ReplayAll()
stack_name = 'test_cvolume_scheduler_hints_api_v1_stack'
stack = utils.parse_stack(self.t, stack_name=stack_name)
ex = self.assertRaises(exception.StackValidationFailed,
self.create_volume, self.t, stack, 'volume3')
self.assertIn('Scheduler hints are not supported by the current '
'volume API.', six.text_type(ex))
self.m.VerifyAll()
def _test_cinder_create_invalid_property_combinations(
self, stack_name, combinations, err_msg, exc):
stack = utils.parse_stack(self.t, stack_name=stack_name)
vp = stack.t['Resources']['volume2']['Properties']
vp.pop('size')
vp.update(combinations)
rsrc = stack['volume2']
ex = self.assertRaises(exc, rsrc.validate)
self.assertEqual(err_msg, six.text_type(ex))
def test_cinder_create_with_image_and_imageRef(self):
stack_name = 'test_create_with_image_and_imageRef'
combinations = {'imageRef': 'image-456', 'image': 'image-123'}
err_msg = ("Cannot define the following properties at the same "
"time: image, imageRef.")
self.stub_ImageConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.ResourcePropertyConflict)
def test_cinder_create_with_size_snapshot_and_image(self):
stack_name = 'test_create_with_size_snapshot_and_image'
combinations = {
'size': 1,
'image': 'image-123',
'snapshot_id': 'snapshot-123'}
self.stub_ImageConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'image\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_size_snapshot_and_imageRef(self):
stack_name = 'test_create_with_size_snapshot_and_imageRef'
combinations = {
'size': 1,
'imageRef': 'image-123',
'snapshot_id': 'snapshot-123'}
self.stub_ImageConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'imageRef\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_size_snapshot_and_sourcevol(self):
stack_name = 'test_create_with_size_snapshot_and_sourcevol'
combinations = {
'size': 1,
'source_volid': 'volume-123',
'snapshot_id': 'snapshot-123'}
self.stub_VolumeConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'source_volid\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_snapshot_and_source_volume(self):
stack_name = 'test_create_with_snapshot_and_source_volume'
combinations = {
'source_volid': 'source_volume-123',
'snapshot_id': 'snapshot-123'}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [\'snapshot_id\', \'source_volid\'].')
self.stub_VolumeConstraint_validate()
self.stub_SnapshotConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_image_and_source_volume(self):
stack_name = 'test_create_with_image_and_source_volume'
combinations = {
'source_volid': 'source_volume-123',
'image': 'image-123'}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [\'source_volid\', \'image\'].')
self.stub_VolumeConstraint_validate()
self.stub_ImageConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_no_size_no_combinations(self):
stack_name = 'test_create_no_size_no_options'
combinations = {}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_volume_restore(self):
stack_name = 'test_cvolume_restore_stack'
# create script
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
# snapshot script
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('available'))
# restore script
fvbr = vt_base.FakeBackupRestore('vol-123')
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.cinder_fc.volumes.update('vol-123',
description='test_description',
name='test_name')
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack.create)()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
scheduler.TaskRunner(stack.snapshot)()
self.assertEqual((stack.SNAPSHOT, stack.COMPLETE), stack.state)
data = stack.prepare_abandon()
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, stack.id)
stack.restore(fake_snapshot)
self.assertEqual((stack.RESTORE, stack.COMPLETE), stack.state)
self.m.VerifyAll()
|
|
import tempfile
import shutil
import os
import sys
import posixpath
from StringIO import StringIO
from django.test import TestCase
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.db.models.loading import load_app
from django.template import Template, Context
from django.contrib.staticfiles import finders, storage
TEST_ROOT = os.path.dirname(__file__)
class StaticFilesTestCase(TestCase):
"""
Test case with a couple utility assertions.
"""
def setUp(self):
self.old_staticfiles_url = settings.STATICFILES_URL
self.old_staticfiles_root = settings.STATICFILES_ROOT
self.old_staticfiles_dirs = settings.STATICFILES_DIRS
self.old_staticfiles_finders = settings.STATICFILES_FINDERS
self.old_installed_apps = settings.INSTALLED_APPS
self.old_media_root = settings.MEDIA_ROOT
self.old_media_url = settings.MEDIA_URL
self.old_admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
self.old_debug = settings.DEBUG
# We have to load these apps to test staticfiles.
load_app('regressiontests.staticfiles_tests.apps.test')
load_app('regressiontests.staticfiles_tests.apps.no_label')
site_media = os.path.join(TEST_ROOT, 'project', 'site_media')
settings.DEBUG = True
settings.MEDIA_ROOT = os.path.join(site_media, 'media')
settings.MEDIA_URL = '/media/'
settings.STATICFILES_ROOT = os.path.join(site_media, 'static')
settings.STATICFILES_URL = '/static/'
settings.ADMIN_MEDIA_PREFIX = '/static/admin/'
settings.STATICFILES_DIRS = (
os.path.join(TEST_ROOT, 'project', 'documents'),
)
settings.STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
def tearDown(self):
settings.DEBUG = self.old_debug
settings.MEDIA_ROOT = self.old_media_root
settings.MEDIA_URL = self.old_media_url
settings.ADMIN_MEDIA_PREFIX = self.old_admin_media_prefix
settings.STATICFILES_ROOT = self.old_staticfiles_root
settings.STATICFILES_URL = self.old_staticfiles_url
settings.STATICFILES_DIRS = self.old_staticfiles_dirs
settings.STATICFILES_FINDERS = self.old_staticfiles_finders
settings.INSTALLED_APPS = self.old_installed_apps
def assertFileContains(self, filepath, text):
self.failUnless(text in self._get_file(filepath),
"'%s' not in '%s'" % (text, filepath))
def assertFileNotFound(self, filepath):
self.assertRaises(IOError, self._get_file, filepath)
class BuildStaticTestCase(StaticFilesTestCase):
"""
Tests shared by all file-resolving features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in UtilityAssertsTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super(BuildStaticTestCase, self).setUp()
self.old_staticfiles_storage = settings.STATICFILES_STORAGE
self.old_root = settings.STATICFILES_ROOT
settings.STATICFILES_ROOT = tempfile.mkdtemp()
self.run_collectstatic()
def tearDown(self):
shutil.rmtree(settings.STATICFILES_ROOT)
settings.STATICFILES_ROOT = self.old_root
super(BuildStaticTestCase, self).tearDown()
def run_collectstatic(self, **kwargs):
call_command('collectstatic', interactive=False, verbosity='0',
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATICFILES_ROOT, filepath)
return open(filepath).read()
class TestDefaults(object):
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app media/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
class TestFindStatic(BuildStaticTestCase, TestDefaults):
"""
Test ``findstatic`` management command.
"""
def _get_file(self, filepath):
_stdout = sys.stdout
sys.stdout = StringIO()
try:
call_command('findstatic', filepath, all=False, verbosity='0')
sys.stdout.seek(0)
lines = [l.strip() for l in sys.stdout.readlines()]
contents = open(lines[1].strip()).read()
finally:
sys.stdout = _stdout
return contents
def test_all_files(self):
"""
Test that findstatic returns all candidate files if run without --first.
"""
_stdout = sys.stdout
sys.stdout = StringIO()
try:
call_command('findstatic', 'test/file.txt', verbosity='0')
sys.stdout.seek(0)
lines = [l.strip() for l in sys.stdout.readlines()]
finally:
sys.stdout = _stdout
self.assertEquals(len(lines), 3) # three because there is also the "Found <file> here" line
self.failUnless('project' in lines[1])
self.failUnless('apps' in lines[2])
class TestBuildStatic(BuildStaticTestCase, TestDefaults):
"""
Test ``collectstatic`` management command.
"""
def test_ignore(self):
"""
Test that -i patterns are ignored.
"""
self.assertFileNotFound('test/test.ignoreme')
def test_common_ignore_patterns(self):
"""
Common ignore patterns (*~, .*, CVS) are ignored.
"""
self.assertFileNotFound('test/.hidden')
self.assertFileNotFound('test/backup~')
self.assertFileNotFound('test/CVS')
class TestBuildStaticExcludeNoDefaultIgnore(BuildStaticTestCase, TestDefaults):
"""
Test ``--exclude-dirs`` and ``--no-default-ignore`` options for
``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestBuildStaticExcludeNoDefaultIgnore, self).run_collectstatic(
use_default_ignore_patterns=False)
def test_no_common_ignore_patterns(self):
"""
With --no-default-ignore, common ignore patterns (*~, .*, CVS)
are not ignored.
"""
self.assertFileContains('test/.hidden', 'should be ignored')
self.assertFileContains('test/backup~', 'should be ignored')
self.assertFileContains('test/CVS', 'should be ignored')
class TestBuildStaticDryRun(BuildStaticTestCase):
"""
Test ``--dry-run`` option for ``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestBuildStaticDryRun, self).run_collectstatic(dry_run=True)
def test_no_files_created(self):
"""
With --dry-run, no files created in destination dir.
"""
self.assertEquals(os.listdir(settings.STATICFILES_ROOT), [])
if sys.platform != 'win32':
class TestBuildStaticLinks(BuildStaticTestCase, TestDefaults):
"""
Test ``--link`` option for ``collectstatic`` management command.
Note that by inheriting ``TestDefaults`` we repeat all
the standard file resolving tests here, to make sure using
``--link`` does not change the file-selection semantics.
"""
def run_collectstatic(self):
super(TestBuildStaticLinks, self).run_collectstatic(link=True)
def test_links_created(self):
"""
With ``--link``, symbolic links are created.
"""
self.failUnless(os.path.islink(os.path.join(settings.STATICFILES_ROOT, 'test.txt')))
class TestServeStatic(StaticFilesTestCase):
"""
Test static asset serving view.
"""
urls = "regressiontests.staticfiles_tests.urls.default"
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.STATICFILES_URL, filepath))
def assertFileContains(self, filepath, text):
self.assertContains(self._response(filepath), text)
def assertFileNotFound(self, filepath):
self.assertEquals(self._response(filepath).status_code, 404)
class TestServeDisabled(TestServeStatic):
"""
Test serving media from django.contrib.admin.
"""
def setUp(self):
super(TestServeDisabled, self).setUp()
settings.DEBUG = False
def test_disabled_serving(self):
self.assertRaisesRegexp(ImproperlyConfigured, "The view to serve "
"static files can only be used if the DEBUG setting is True",
self._response, 'test.txt')
class TestServeStaticWithDefaultURL(TestServeStatic, TestDefaults):
"""
Test static asset serving view with staticfiles_urlpatterns helper.
"""
pass
class TestServeStaticWithURLHelper(TestServeStatic, TestDefaults):
"""
Test static asset serving view with staticfiles_urlpatterns helper.
"""
urls = "regressiontests.staticfiles_tests.urls.helper"
class TestServeAdminMedia(TestServeStatic):
"""
Test serving media from django.contrib.admin.
"""
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.ADMIN_MEDIA_PREFIX, filepath))
def test_serve_admin_media(self):
self.assertFileContains('css/base.css', 'body')
class FinderTestCase(object):
"""
Base finder test mixin
"""
def test_find_first(self):
src, dst = self.find_first
self.assertEquals(self.finder.find(src), dst)
def test_find_all(self):
src, dst = self.find_all
self.assertEquals(self.finder.find(src, all=True), dst)
class TestFileSystemFinder(StaticFilesTestCase, FinderTestCase):
"""
Test FileSystemFinder.
"""
def setUp(self):
super(TestFileSystemFinder, self).setUp()
self.finder = finders.FileSystemFinder()
test_file_path = os.path.join(TEST_ROOT, 'project/documents/test/file.txt')
self.find_first = ("test/file.txt", test_file_path)
self.find_all = ("test/file.txt", [test_file_path])
class TestAppDirectoriesFinder(StaticFilesTestCase, FinderTestCase):
"""
Test AppDirectoriesFinder.
"""
def setUp(self):
super(TestAppDirectoriesFinder, self).setUp()
self.finder = finders.AppDirectoriesFinder()
test_file_path = os.path.join(TEST_ROOT, 'apps/test/static/test/file1.txt')
self.find_first = ("test/file1.txt", test_file_path)
self.find_all = ("test/file1.txt", [test_file_path])
class TestDefaultStorageFinder(StaticFilesTestCase, FinderTestCase):
"""
Test DefaultStorageFinder.
"""
def setUp(self):
super(TestDefaultStorageFinder, self).setUp()
self.finder = finders.DefaultStorageFinder(
storage=storage.StaticFilesStorage(location=settings.MEDIA_ROOT))
test_file_path = os.path.join(settings.MEDIA_ROOT, 'media-file.txt')
self.find_first = ("media-file.txt", test_file_path)
self.find_all = ("media-file.txt", [test_file_path])
class TestMiscFinder(TestCase):
"""
A few misc finder tests.
"""
def test_get_finder(self):
self.assertTrue(isinstance(finders.get_finder(
"django.contrib.staticfiles.finders.FileSystemFinder"),
finders.FileSystemFinder))
self.assertRaises(ImproperlyConfigured,
finders.get_finder, "django.contrib.staticfiles.finders.FooBarFinder")
self.assertRaises(ImproperlyConfigured,
finders.get_finder, "foo.bar.FooBarFinder")
class TemplateTagTest(TestCase):
def test_get_staticfiles_prefix(self):
"""
Test the get_staticfiles_prefix helper return the STATICFILES_URL setting.
"""
self.assertEquals(Template(
"{% load staticfiles %}"
"{% get_staticfiles_prefix %}"
).render(Context()), settings.STATICFILES_URL)
def test_get_staticfiles_prefix_with_as(self):
"""
Test the get_staticfiles_prefix helper return the STATICFILES_URL setting.
"""
self.assertEquals(Template(
"{% load staticfiles %}"
"{% get_staticfiles_prefix as staticfiles_prefix %}"
"{{ staticfiles_prefix }}"
).render(Context()), settings.STATICFILES_URL)
|
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
from resource_management.core.exceptions import Fail
@patch("os.path.isfile", new = MagicMock(return_value=True))
@patch("glob.glob", new = MagicMock(return_value=["one", "two"]))
class TestWebHCatServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
STACK_VERSION = "2.0.6"
CONFIG_OVERRIDES = {"serviceName":"HIVE", "role":"WEBHCAT_SERVER"}
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', 'cd /var/run/webhcat ; /usr/hdp/current/hive-webhcat/sbin/webhcat_server.sh start',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client'},
not_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
user = 'hcat',
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/hdp/current/hive-webhcat/sbin/webhcat_server.sh stop',
user = 'hcat',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client' }
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/webhcat/webhcat.pid`',
only_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
ignore_failures = True
)
self.assertResourceCalled('Execute', "! (ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1)")
self.assertResourceCalled('File', '/var/run/webhcat/webhcat.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "configure",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertNoMoreResources()
@patch("webhcat_service.graceful_stop", new = MagicMock(side_effect=Fail))
def test_stop_graceful_stop_failed(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', "find /var/log/webhcat -maxdepth 1 -type f -name '*' -exec echo '==> {} <==' \\; -exec tail -n 40 {} \\;",
logoutput = True,
ignore_failures = True,
user = 'hcat',
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/webhcat/webhcat.pid`',
only_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
ignore_failures = True
)
self.assertResourceCalled('Execute', "! (ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1)")
self.assertResourceCalled('File', '/var/run/webhcat/webhcat.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "start",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', 'cd /var/run/webhcat ; /usr/hdp/current/hive-webhcat/sbin/webhcat_server.sh start',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client'},
not_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
user = 'hcat',
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "stop",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/hdp/current/hive-webhcat/sbin/webhcat_server.sh stop',
user = 'hcat',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client' }
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/webhcat/webhcat.pid`',
only_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
ignore_failures = True
)
self.assertResourceCalled('Execute', "! (ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1)")
self.assertResourceCalled('File', '/var/run/webhcat/webhcat.pid',
action = ['delete'],
)
self.assertNoMoreResources()
@patch("webhcat_service.graceful_stop", new = MagicMock(side_effect=Fail))
def test_stop_secured_graceful_stop_failed(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "stop",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', "find /var/log/webhcat -maxdepth 1 -type f -name '*' -exec echo '==> {} <==' \\; -exec tail -n 40 {} \\;",
logoutput = True,
ignore_failures = True,
user = 'hcat',
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/webhcat/webhcat.pid`',
only_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
ignore_failures = True
)
self.assertResourceCalled('Execute', "! (ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1)")
self.assertResourceCalled('File', '/var/run/webhcat/webhcat.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def assert_configure_default(self):
self.assertResourceCalled('Directory', '/var/run/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/log/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/etc/hive-webhcat/conf',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
cd_access = 'a'
)
self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
owner = 'hcat',
group = 'hadoop',
conf_dir = '/etc/hive-webhcat/conf',
configurations = self.getConfig()['configurations']['webhcat-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['webhcat-site']
)
self.assertResourceCalled('File', '/etc/hive-webhcat/conf/webhcat-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['webhcat-env']['content']),
owner = 'hcat',
group = 'hadoop',
)
self.assertResourceCalled('Directory', '/usr/hdp/current/hive-webhcat/conf',
cd_access = 'a',
create_parents = True
)
self.assertResourceCalled('File', '/etc/hive-webhcat/conf/webhcat-log4j.properties',
content = InlineTemplate('log4jproperties\nline2'),
owner = 'hcat',
group = 'hadoop',
mode = 0644,
)
def assert_configure_secured(self):
self.assertResourceCalled('Directory', '/var/run/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/log/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/etc/hive-webhcat/conf',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
cd_access = 'a'
)
self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
owner = 'hcat',
group = 'hadoop',
conf_dir = '/etc/hive-webhcat/conf',
configurations = self.getConfig()['configurations']['webhcat-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['webhcat-site']
)
self.assertResourceCalled('File', '/etc/hive-webhcat/conf/webhcat-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['webhcat-env']['content']),
owner = 'hcat',
group = 'hadoop',
)
self.assertResourceCalled('Directory', '/usr/hdp/current/hive-webhcat/conf',
cd_access = 'a',
create_parents = True
)
self.assertResourceCalled('File', '/etc/hive-webhcat/conf/webhcat-log4j.properties',
content = InlineTemplate('log4jproperties\nline2'),
owner = 'hcat',
group = 'hadoop',
mode = 0644,
)
def test_pre_upgrade_restart(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.2.1.0-3242'
json_content['commandParams']['version'] = version
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart_23(self, call_mock):
import sys
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['stack_version'] = "2.3"
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
mocks_dict = mocks_dict)
self.assertTrue("params" in sys.modules)
self.assertTrue(sys.modules["params"].webhcat_conf_dir is not None)
self.assertTrue("/usr/hdp/current/hive-webhcat/etc/webhcat" == sys.modules["params"].webhcat_conf_dir)
self.assertResourceCalledIgnoreEarlier('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_rolling_restart_configure(self, call_mock):
import sys
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['stack_version'] = "2.3"
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "configure",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalled('Directory', '/var/run/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755)
self.assertResourceCalled('Directory', '/var/log/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755)
self.assertResourceCalled('Directory', '/usr/hdp/current/hive-webhcat/etc/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
cd_access = 'a',)
self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
owner = 'hcat',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hive-webhcat/etc/webhcat',
configurations = self.getConfig()['configurations']['webhcat-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['webhcat-site'])
self.assertResourceCalled('XmlConfig', 'hive-site.xml',
owner = 'hive',
group = 'hadoop',
conf_dir = '/usr/hdp/2.3.0.0-1234/hive/conf',
configuration_attributes = {u'final': {u'hive.optimize.bucketmapjoin.sortedmerge': u'true',
u'javax.jdo.option.ConnectionDriverName': u'true',
u'javax.jdo.option.ConnectionPassword': u'true'}},
configurations = self.getConfig()['configurations']['hive-site'],
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
conf_dir = '/usr/hdp/2.3.0.0-1234/hadoop/conf',
configuration_attributes = {u'final': {u'yarn.nodemanager.container-executor.class': u'true',
u'yarn.nodemanager.disk-health-checker.min-healthy-disks': u'true',
u'yarn.nodemanager.local-dirs': u'true'}},
configurations = self.getConfig()['configurations']['yarn-site'],
)
self.assertResourceCalled('File', '/usr/hdp/current/hive-webhcat/etc/webhcat/webhcat-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['webhcat-env']['content']),
owner = 'hcat',
group = 'hadoop')
self.assertResourceCalled('Directory', '/usr/hdp/current/hive-webhcat/etc/webhcat',
cd_access = 'a',
create_parents = True)
self.assertResourceCalled('File', '/usr/hdp/current/hive-webhcat/etc/webhcat/webhcat-log4j.properties',
content = InlineTemplate('log4jproperties\nline2'),
owner = 'hcat',
group = 'hadoop',
mode = 0644)
self.assertNoMoreResources()
|
|
#!/usr/bin/python
#
# Copyright (C) 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.utils.process"""
import unittest
import tempfile
import shutil
import os
import stat
import time
import select
import signal
from ganeti import constants
from ganeti import utils
from ganeti import errors
import testutils
class TestIsProcessAlive(unittest.TestCase):
"""Testing case for IsProcessAlive"""
def testExists(self):
mypid = os.getpid()
self.assert_(utils.IsProcessAlive(mypid), "can't find myself running")
def testNotExisting(self):
pid_non_existing = os.fork()
if pid_non_existing == 0:
os._exit(0)
elif pid_non_existing < 0:
raise SystemError("can't fork")
os.waitpid(pid_non_existing, 0)
self.assertFalse(utils.IsProcessAlive(pid_non_existing),
"nonexisting process detected")
class TestGetProcStatusPath(unittest.TestCase):
def test(self):
self.assert_("/1234/" in utils.process._GetProcStatusPath(1234))
self.assertNotEqual(utils.process._GetProcStatusPath(1),
utils.process._GetProcStatusPath(2))
class TestIsProcessHandlingSignal(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testParseSigsetT(self):
parse_sigset_t_fn = utils.process._ParseSigsetT
self.assertEqual(len(parse_sigset_t_fn("0")), 0)
self.assertEqual(parse_sigset_t_fn("1"), set([1]))
self.assertEqual(parse_sigset_t_fn("1000a"), set([2, 4, 17]))
self.assertEqual(parse_sigset_t_fn("810002"), set([2, 17, 24, ]))
self.assertEqual(parse_sigset_t_fn("0000000180000202"),
set([2, 10, 32, 33]))
self.assertEqual(parse_sigset_t_fn("0000000180000002"),
set([2, 32, 33]))
self.assertEqual(parse_sigset_t_fn("0000000188000002"),
set([2, 28, 32, 33]))
self.assertEqual(parse_sigset_t_fn("000000004b813efb"),
set([1, 2, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 17,
24, 25, 26, 28, 31]))
self.assertEqual(parse_sigset_t_fn("ffffff"), set(range(1, 25)))
def testGetProcStatusField(self):
for field in ["SigCgt", "Name", "FDSize"]:
for value in ["", "0", "cat", " 1234 KB"]:
pstatus = "\n".join([
"VmPeak: 999 kB",
"%s: %s" % (field, value),
"TracerPid: 0",
])
result = utils.process._GetProcStatusField(pstatus, field)
self.assertEqual(result, value.strip())
def test(self):
sp = utils.PathJoin(self.tmpdir, "status")
utils.WriteFile(sp, data="\n".join([
"Name: bash",
"State: S (sleeping)",
"SleepAVG: 98%",
"Pid: 22250",
"PPid: 10858",
"TracerPid: 0",
"SigBlk: 0000000000010000",
"SigIgn: 0000000000384004",
"SigCgt: 000000004b813efb",
"CapEff: 0000000000000000",
]))
self.assert_(utils.IsProcessHandlingSignal(1234, 10, status_path=sp))
def testNoSigCgt(self):
sp = utils.PathJoin(self.tmpdir, "status")
utils.WriteFile(sp, data="\n".join([
"Name: bash",
]))
self.assertRaises(RuntimeError, utils.IsProcessHandlingSignal,
1234, 10, status_path=sp)
def testNoSuchFile(self):
sp = utils.PathJoin(self.tmpdir, "notexist")
self.assertFalse(utils.IsProcessHandlingSignal(1234, 10, status_path=sp))
@staticmethod
def _TestRealProcess():
signal.signal(signal.SIGUSR1, signal.SIG_DFL)
if utils.IsProcessHandlingSignal(os.getpid(), signal.SIGUSR1):
raise Exception("SIGUSR1 is handled when it should not be")
signal.signal(signal.SIGUSR1, lambda signum, frame: None)
if not utils.IsProcessHandlingSignal(os.getpid(), signal.SIGUSR1):
raise Exception("SIGUSR1 is not handled when it should be")
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
if utils.IsProcessHandlingSignal(os.getpid(), signal.SIGUSR1):
raise Exception("SIGUSR1 is not handled when it should be")
signal.signal(signal.SIGUSR1, signal.SIG_DFL)
if utils.IsProcessHandlingSignal(os.getpid(), signal.SIGUSR1):
raise Exception("SIGUSR1 is handled when it should not be")
return True
def testRealProcess(self):
self.assert_(utils.RunInSeparateProcess(self._TestRealProcess))
class _PostforkProcessReadyHelper:
"""A helper to use with C{postfork_fn} in RunCmd.
It makes sure a process has reached a certain state by reading from a fifo.
@ivar write_fd: The fd number to write to
"""
def __init__(self, timeout):
"""Initialize the helper.
@param fifo_dir: The dir where we can create the fifo
@param timeout: The time in seconds to wait before giving up
"""
self.timeout = timeout
(self.read_fd, self.write_fd) = os.pipe()
def Ready(self, pid):
"""Waits until the process is ready.
@param pid: The pid of the process
"""
(read_ready, _, _) = select.select([self.read_fd], [], [], self.timeout)
if not read_ready:
# We hit the timeout
raise AssertionError("Timeout %d reached while waiting for process %d"
" to become ready" % (self.timeout, pid))
def Cleanup(self):
"""Cleans up the helper.
"""
os.close(self.read_fd)
os.close(self.write_fd)
class TestRunCmd(testutils.GanetiTestCase):
"""Testing case for the RunCmd function"""
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.magic = time.ctime() + " ganeti test"
self.fname = self._CreateTempFile()
self.fifo_tmpdir = tempfile.mkdtemp()
self.fifo_file = os.path.join(self.fifo_tmpdir, "ganeti_test_fifo")
os.mkfifo(self.fifo_file)
# If the process is not ready after 20 seconds we have bigger issues
self.proc_ready_helper = _PostforkProcessReadyHelper(20)
def tearDown(self):
self.proc_ready_helper.Cleanup()
shutil.rmtree(self.fifo_tmpdir)
testutils.GanetiTestCase.tearDown(self)
def testOk(self):
"""Test successful exit code"""
result = utils.RunCmd("/bin/sh -c 'exit 0'")
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output, "")
def testFail(self):
"""Test fail exit code"""
result = utils.RunCmd("/bin/sh -c 'exit 1'")
self.assertEqual(result.exit_code, 1)
self.assertEqual(result.output, "")
def testStdout(self):
"""Test standard output"""
cmd = 'echo -n "%s"' % self.magic
result = utils.RunCmd("/bin/sh -c '%s'" % cmd)
self.assertEqual(result.stdout, self.magic)
result = utils.RunCmd("/bin/sh -c '%s'" % cmd, output=self.fname)
self.assertEqual(result.output, "")
self.assertFileContent(self.fname, self.magic)
def testStderr(self):
"""Test standard error"""
cmd = 'echo -n "%s"' % self.magic
result = utils.RunCmd("/bin/sh -c '%s' 1>&2" % cmd)
self.assertEqual(result.stderr, self.magic)
result = utils.RunCmd("/bin/sh -c '%s' 1>&2" % cmd, output=self.fname)
self.assertEqual(result.output, "")
self.assertFileContent(self.fname, self.magic)
def testCombined(self):
"""Test combined output"""
cmd = 'echo -n "A%s"; echo -n "B%s" 1>&2' % (self.magic, self.magic)
expected = "A" + self.magic + "B" + self.magic
result = utils.RunCmd("/bin/sh -c '%s'" % cmd)
self.assertEqual(result.output, expected)
result = utils.RunCmd("/bin/sh -c '%s'" % cmd, output=self.fname)
self.assertEqual(result.output, "")
self.assertFileContent(self.fname, expected)
def testSignal(self):
"""Test signal"""
result = utils.RunCmd(["python", "-c",
"import os; os.kill(os.getpid(), 15)"])
self.assertEqual(result.signal, 15)
self.assertEqual(result.output, "")
def testTimeoutFlagTrue(self):
result = utils.RunCmd(["sleep", "2"], timeout=0.1)
self.assertTrue(result.failed)
self.assertTrue(result.failed_by_timeout)
def testTimeoutFlagFalse(self):
result = utils.RunCmd(["false"], timeout=5)
self.assertTrue(result.failed)
self.assertFalse(result.failed_by_timeout)
def testTimeoutClean(self):
cmd = ("trap 'exit 0' TERM; echo >&%d; read < %s" %
(self.proc_ready_helper.write_fd, self.fifo_file))
result = utils.RunCmd(["/bin/sh", "-c", cmd], timeout=0.2,
noclose_fds=[self.proc_ready_helper.write_fd],
postfork_fn=self.proc_ready_helper.Ready)
self.assertEqual(result.exit_code, 0)
def testTimeoutKill(self):
cmd = ["/bin/sh", "-c", "trap '' TERM; echo >&%d; read < %s" %
(self.proc_ready_helper.write_fd, self.fifo_file)]
timeout = 0.2
(out, err, status, ta) = \
utils.process._RunCmdPipe(cmd, {}, False, "/", False,
timeout, [self.proc_ready_helper.write_fd],
None,
_linger_timeout=0.2,
postfork_fn=self.proc_ready_helper.Ready)
self.assert_(status < 0)
self.assertEqual(-status, signal.SIGKILL)
def testTimeoutOutputAfterTerm(self):
cmd = ("trap 'echo sigtermed; exit 1' TERM; echo >&%d; read < %s" %
(self.proc_ready_helper.write_fd, self.fifo_file))
result = utils.RunCmd(["/bin/sh", "-c", cmd], timeout=0.2,
noclose_fds=[self.proc_ready_helper.write_fd],
postfork_fn=self.proc_ready_helper.Ready)
self.assert_(result.failed)
self.assertEqual(result.stdout, "sigtermed\n")
def testListRun(self):
"""Test list runs"""
result = utils.RunCmd(["true"])
self.assertEqual(result.signal, None)
self.assertEqual(result.exit_code, 0)
result = utils.RunCmd(["/bin/sh", "-c", "exit 1"])
self.assertEqual(result.signal, None)
self.assertEqual(result.exit_code, 1)
result = utils.RunCmd(["echo", "-n", self.magic])
self.assertEqual(result.signal, None)
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.stdout, self.magic)
def testFileEmptyOutput(self):
"""Test file output"""
result = utils.RunCmd(["true"], output=self.fname)
self.assertEqual(result.signal, None)
self.assertEqual(result.exit_code, 0)
self.assertFileContent(self.fname, "")
def testLang(self):
"""Test locale environment"""
old_env = os.environ.copy()
try:
os.environ["LANG"] = "en_US.UTF-8"
os.environ["LC_ALL"] = "en_US.UTF-8"
result = utils.RunCmd(["locale"])
for line in result.output.splitlines():
key, value = line.split("=", 1)
# Ignore these variables, they're overridden by LC_ALL
if key == "LANG" or key == "LANGUAGE":
continue
self.failIf(value and value != "C" and value != '"C"',
"Variable %s is set to the invalid value '%s'" % (key, value))
finally:
os.environ = old_env
def testDefaultCwd(self):
"""Test default working directory"""
self.failUnlessEqual(utils.RunCmd(["pwd"]).stdout.strip(), "/")
def testCwd(self):
"""Test default working directory"""
self.failUnlessEqual(utils.RunCmd(["pwd"], cwd="/").stdout.strip(), "/")
self.failUnlessEqual(utils.RunCmd(["pwd"], cwd="/tmp").stdout.strip(),
"/tmp")
cwd = os.getcwd()
self.failUnlessEqual(utils.RunCmd(["pwd"], cwd=cwd).stdout.strip(), cwd)
def testResetEnv(self):
"""Test environment reset functionality"""
self.failUnlessEqual(utils.RunCmd(["env"], reset_env=True).stdout.strip(),
"")
self.failUnlessEqual(utils.RunCmd(["env"], reset_env=True,
env={"FOO": "bar",}).stdout.strip(),
"FOO=bar")
def testNoFork(self):
"""Test that nofork raise an error"""
self.assertFalse(utils.process._no_fork)
utils.DisableFork()
try:
self.assertTrue(utils.process._no_fork)
self.assertRaises(errors.ProgrammerError, utils.RunCmd, ["true"])
finally:
utils.process._no_fork = False
self.assertFalse(utils.process._no_fork)
def testWrongParams(self):
"""Test wrong parameters"""
self.assertRaises(errors.ProgrammerError, utils.RunCmd, ["true"],
output="/dev/null", interactive=True)
def testNocloseFds(self):
"""Test selective fd retention (noclose_fds)"""
temp = open(self.fname, "r+")
try:
temp.write("test")
temp.seek(0)
cmd = "read -u %d; echo $REPLY" % temp.fileno()
result = utils.RunCmd(["/bin/bash", "-c", cmd])
self.assertEqual(result.stdout.strip(), "")
temp.seek(0)
result = utils.RunCmd(["/bin/bash", "-c", cmd],
noclose_fds=[temp.fileno()])
self.assertEqual(result.stdout.strip(), "test")
finally:
temp.close()
def testNoInputRead(self):
testfile = testutils.TestDataFilename("cert1.pem")
result = utils.RunCmd(["cat"], timeout=10.0)
self.assertFalse(result.failed)
self.assertEqual(result.stderr, "")
self.assertEqual(result.stdout, "")
def testInputFileHandle(self):
testfile = testutils.TestDataFilename("cert1.pem")
result = utils.RunCmd(["cat"], input_fd=open(testfile, "r"))
self.assertFalse(result.failed)
self.assertEqual(result.stdout, utils.ReadFile(testfile))
self.assertEqual(result.stderr, "")
def testInputNumericFileDescriptor(self):
testfile = testutils.TestDataFilename("cert2.pem")
fh = open(testfile, "r")
try:
result = utils.RunCmd(["cat"], input_fd=fh.fileno())
finally:
fh.close()
self.assertFalse(result.failed)
self.assertEqual(result.stdout, utils.ReadFile(testfile))
self.assertEqual(result.stderr, "")
def testInputWithCloseFds(self):
testfile = testutils.TestDataFilename("cert1.pem")
temp = open(self.fname, "r+")
try:
temp.write("test283523367")
temp.seek(0)
result = utils.RunCmd(["/bin/bash", "-c",
("cat && read -u %s; echo $REPLY" %
temp.fileno())],
input_fd=open(testfile, "r"),
noclose_fds=[temp.fileno()])
self.assertFalse(result.failed)
self.assertEqual(result.stdout.strip(),
utils.ReadFile(testfile) + "test283523367")
self.assertEqual(result.stderr, "")
finally:
temp.close()
def testOutputAndInteractive(self):
self.assertRaises(errors.ProgrammerError, utils.RunCmd,
[], output=self.fname, interactive=True)
def testOutputAndInput(self):
self.assertRaises(errors.ProgrammerError, utils.RunCmd,
[], output=self.fname, input_fd=open(self.fname))
class TestRunParts(testutils.GanetiTestCase):
"""Testing case for the RunParts function"""
def setUp(self):
self.rundir = tempfile.mkdtemp(prefix="ganeti-test", suffix=".tmp")
def tearDown(self):
shutil.rmtree(self.rundir)
def testEmpty(self):
"""Test on an empty dir"""
self.failUnlessEqual(utils.RunParts(self.rundir, reset_env=True), [])
def testSkipWrongName(self):
"""Test that wrong files are skipped"""
fname = os.path.join(self.rundir, "00test.dot")
utils.WriteFile(fname, data="")
os.chmod(fname, stat.S_IREAD | stat.S_IEXEC)
relname = os.path.basename(fname)
self.failUnlessEqual(utils.RunParts(self.rundir, reset_env=True),
[(relname, constants.RUNPARTS_SKIP, None)])
def testSkipNonExec(self):
"""Test that non executable files are skipped"""
fname = os.path.join(self.rundir, "00test")
utils.WriteFile(fname, data="")
relname = os.path.basename(fname)
self.failUnlessEqual(utils.RunParts(self.rundir, reset_env=True),
[(relname, constants.RUNPARTS_SKIP, None)])
def testError(self):
"""Test error on a broken executable"""
fname = os.path.join(self.rundir, "00test")
utils.WriteFile(fname, data="")
os.chmod(fname, stat.S_IREAD | stat.S_IEXEC)
(relname, status, error) = utils.RunParts(self.rundir, reset_env=True)[0]
self.failUnlessEqual(relname, os.path.basename(fname))
self.failUnlessEqual(status, constants.RUNPARTS_ERR)
self.failUnless(error)
def testSorted(self):
"""Test executions are sorted"""
files = []
files.append(os.path.join(self.rundir, "64test"))
files.append(os.path.join(self.rundir, "00test"))
files.append(os.path.join(self.rundir, "42test"))
for fname in files:
utils.WriteFile(fname, data="")
results = utils.RunParts(self.rundir, reset_env=True)
for fname in sorted(files):
self.failUnlessEqual(os.path.basename(fname), results.pop(0)[0])
def testOk(self):
"""Test correct execution"""
fname = os.path.join(self.rundir, "00test")
utils.WriteFile(fname, data="#!/bin/sh\n\necho -n ciao")
os.chmod(fname, stat.S_IREAD | stat.S_IEXEC)
(relname, status, runresult) = \
utils.RunParts(self.rundir, reset_env=True)[0]
self.failUnlessEqual(relname, os.path.basename(fname))
self.failUnlessEqual(status, constants.RUNPARTS_RUN)
self.failUnlessEqual(runresult.stdout, "ciao")
def testRunFail(self):
"""Test correct execution, with run failure"""
fname = os.path.join(self.rundir, "00test")
utils.WriteFile(fname, data="#!/bin/sh\n\nexit 1")
os.chmod(fname, stat.S_IREAD | stat.S_IEXEC)
(relname, status, runresult) = \
utils.RunParts(self.rundir, reset_env=True)[0]
self.failUnlessEqual(relname, os.path.basename(fname))
self.failUnlessEqual(status, constants.RUNPARTS_RUN)
self.failUnlessEqual(runresult.exit_code, 1)
self.failUnless(runresult.failed)
def testRunMix(self):
files = []
files.append(os.path.join(self.rundir, "00test"))
files.append(os.path.join(self.rundir, "42test"))
files.append(os.path.join(self.rundir, "64test"))
files.append(os.path.join(self.rundir, "99test"))
files.sort()
# 1st has errors in execution
utils.WriteFile(files[0], data="#!/bin/sh\n\nexit 1")
os.chmod(files[0], stat.S_IREAD | stat.S_IEXEC)
# 2nd is skipped
utils.WriteFile(files[1], data="")
# 3rd cannot execute properly
utils.WriteFile(files[2], data="")
os.chmod(files[2], stat.S_IREAD | stat.S_IEXEC)
# 4th execs
utils.WriteFile(files[3], data="#!/bin/sh\n\necho -n ciao")
os.chmod(files[3], stat.S_IREAD | stat.S_IEXEC)
results = utils.RunParts(self.rundir, reset_env=True)
(relname, status, runresult) = results[0]
self.failUnlessEqual(relname, os.path.basename(files[0]))
self.failUnlessEqual(status, constants.RUNPARTS_RUN)
self.failUnlessEqual(runresult.exit_code, 1)
self.failUnless(runresult.failed)
(relname, status, runresult) = results[1]
self.failUnlessEqual(relname, os.path.basename(files[1]))
self.failUnlessEqual(status, constants.RUNPARTS_SKIP)
self.failUnlessEqual(runresult, None)
(relname, status, runresult) = results[2]
self.failUnlessEqual(relname, os.path.basename(files[2]))
self.failUnlessEqual(status, constants.RUNPARTS_ERR)
self.failUnless(runresult)
(relname, status, runresult) = results[3]
self.failUnlessEqual(relname, os.path.basename(files[3]))
self.failUnlessEqual(status, constants.RUNPARTS_RUN)
self.failUnlessEqual(runresult.output, "ciao")
self.failUnlessEqual(runresult.exit_code, 0)
self.failUnless(not runresult.failed)
def testMissingDirectory(self):
nosuchdir = utils.PathJoin(self.rundir, "no/such/directory")
self.assertEqual(utils.RunParts(nosuchdir), [])
class TestStartDaemon(testutils.GanetiTestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix="ganeti-test")
self.tmpfile = os.path.join(self.tmpdir, "test")
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testShell(self):
utils.StartDaemon("echo Hello World > %s" % self.tmpfile)
self._wait(self.tmpfile, 60.0, "Hello World")
def testShellOutput(self):
utils.StartDaemon("echo Hello World", output=self.tmpfile)
self._wait(self.tmpfile, 60.0, "Hello World")
def testNoShellNoOutput(self):
utils.StartDaemon(["pwd"])
def testNoShellNoOutputTouch(self):
testfile = os.path.join(self.tmpdir, "check")
self.failIf(os.path.exists(testfile))
utils.StartDaemon(["touch", testfile])
self._wait(testfile, 60.0, "")
def testNoShellOutput(self):
utils.StartDaemon(["pwd"], output=self.tmpfile)
self._wait(self.tmpfile, 60.0, "/")
def testNoShellOutputCwd(self):
utils.StartDaemon(["pwd"], output=self.tmpfile, cwd=os.getcwd())
self._wait(self.tmpfile, 60.0, os.getcwd())
def testShellEnv(self):
utils.StartDaemon("echo \"$GNT_TEST_VAR\"", output=self.tmpfile,
env={ "GNT_TEST_VAR": "Hello World", })
self._wait(self.tmpfile, 60.0, "Hello World")
def testNoShellEnv(self):
utils.StartDaemon(["printenv", "GNT_TEST_VAR"], output=self.tmpfile,
env={ "GNT_TEST_VAR": "Hello World", })
self._wait(self.tmpfile, 60.0, "Hello World")
def testOutputFd(self):
fd = os.open(self.tmpfile, os.O_WRONLY | os.O_CREAT)
try:
utils.StartDaemon(["pwd"], output_fd=fd, cwd=os.getcwd())
finally:
os.close(fd)
self._wait(self.tmpfile, 60.0, os.getcwd())
def testPid(self):
pid = utils.StartDaemon("echo $$ > %s" % self.tmpfile)
self._wait(self.tmpfile, 60.0, str(pid))
def testPidFile(self):
pidfile = os.path.join(self.tmpdir, "pid")
checkfile = os.path.join(self.tmpdir, "abort")
pid = utils.StartDaemon("while sleep 5; do :; done", pidfile=pidfile,
output=self.tmpfile)
try:
fd = os.open(pidfile, os.O_RDONLY)
try:
# Check file is locked
self.assertRaises(errors.LockError, utils.LockFile, fd)
pidtext = os.read(fd, 100)
finally:
os.close(fd)
self.assertEqual(int(pidtext.strip()), pid)
self.assert_(utils.IsProcessAlive(pid))
finally:
# No matter what happens, kill daemon
utils.KillProcess(pid, timeout=5.0, waitpid=False)
self.failIf(utils.IsProcessAlive(pid))
self.assertEqual(utils.ReadFile(self.tmpfile), "")
def _wait(self, path, timeout, expected):
# Due to the asynchronous nature of daemon processes, polling is necessary.
# A timeout makes sure the test doesn't hang forever.
def _CheckFile():
if not (os.path.isfile(path) and
utils.ReadFile(path).strip() == expected):
raise utils.RetryAgain()
try:
utils.Retry(_CheckFile, (0.01, 1.5, 1.0), timeout)
except utils.RetryTimeout:
self.fail("Apparently the daemon didn't run in %s seconds and/or"
" didn't write the correct output" % timeout)
def testError(self):
self.assertRaises(errors.OpExecError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"])
self.assertRaises(errors.OpExecError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"],
output=os.path.join(self.tmpdir, "DIR/NOT/EXIST"))
self.assertRaises(errors.OpExecError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"],
cwd=os.path.join(self.tmpdir, "DIR/NOT/EXIST"))
self.assertRaises(errors.OpExecError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"],
output=os.path.join(self.tmpdir, "DIR/NOT/EXIST"))
fd = os.open(self.tmpfile, os.O_WRONLY | os.O_CREAT)
try:
self.assertRaises(errors.ProgrammerError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"],
output=self.tmpfile, output_fd=fd)
finally:
os.close(fd)
class RunInSeparateProcess(unittest.TestCase):
def test(self):
for exp in [True, False]:
def _child():
return exp
self.assertEqual(exp, utils.RunInSeparateProcess(_child))
def testArgs(self):
for arg in [0, 1, 999, "Hello World", (1, 2, 3)]:
def _child(carg1, carg2):
return carg1 == "Foo" and carg2 == arg
self.assert_(utils.RunInSeparateProcess(_child, "Foo", arg))
def testPid(self):
parent_pid = os.getpid()
def _check():
return os.getpid() == parent_pid
self.failIf(utils.RunInSeparateProcess(_check))
def testSignal(self):
def _kill():
os.kill(os.getpid(), signal.SIGTERM)
self.assertRaises(errors.GenericError,
utils.RunInSeparateProcess, _kill)
def testException(self):
def _exc():
raise errors.GenericError("This is a test")
self.assertRaises(errors.GenericError,
utils.RunInSeparateProcess, _exc)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating instances."""
import argparse
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import csek_utils
from googlecloudsdk.api_lib.compute import image_utils
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.api_lib.compute import property_selector
from googlecloudsdk.api_lib.compute import resource_specs
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute import zone_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.instances import flags as instances_flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
DETAILED_HELP = {
'DESCRIPTION': """\
*{command}* facilitates the creation of Google Compute Engine
virtual machines. For example, running:
$ {command} example-instance-1 example-instance-2 example-instance-3 --zone us-central1-a
will create three instances called `example-instance-1`,
`example-instance-2`, and `example-instance-3` in the
`us-central1-a` zone.
When an instance is in RUNNING state and the system begins to boot,
the instance creation is considered finished, and the command returns
with a list of new virtual machines. Note that you usually cannot log
into a new instance until it finishes booting. Check the progress of an
instance using `gcloud compute instances get-serial-port-output`.
For more examples, refer to the *EXAMPLES* section below.
""",
'EXAMPLES': """\
To create an instance with the latest ``Red Hat Enterprise Linux
7'' image available, run:
$ {command} example-instance --image-family rhel-7 --image-project rhel-cloud --zone us-central1-a
""",
}
def _CommonArgs(parser, multiple_network_interface_cards, release_track,
support_alias_ip_ranges, support_public_dns,
support_network_tier,
enable_regional=False):
"""Register parser args common to all tracks."""
metadata_utils.AddMetadataArgs(parser)
instances_flags.AddDiskArgs(parser, enable_regional)
if release_track in [base.ReleaseTrack.ALPHA]:
instances_flags.AddCreateDiskArgs(parser)
instances_flags.AddExtendedMachineTypeArgs(parser)
if release_track in [base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA]:
instances_flags.AddAcceleratorArgs(parser)
instances_flags.AddLocalSsdArgs(parser)
instances_flags.AddCanIpForwardArgs(parser)
instances_flags.AddAddressArgs(
parser, instances=True,
multiple_network_interface_cards=multiple_network_interface_cards,
support_alias_ip_ranges=support_alias_ip_ranges,
support_network_tier=support_network_tier)
instances_flags.AddMachineTypeArgs(parser)
instances_flags.AddMaintenancePolicyArgs(parser)
instances_flags.AddNoRestartOnFailureArgs(parser)
instances_flags.AddPreemptibleVmArgs(parser)
instances_flags.AddServiceAccountAndScopeArgs(parser, False)
instances_flags.AddTagsArgs(parser)
instances_flags.AddCustomMachineTypeArgs(parser)
instances_flags.AddNetworkArgs(parser)
instances_flags.AddPrivateNetworkIpArgs(parser)
instances_flags.AddImageArgs(parser)
if support_public_dns:
instances_flags.AddPublicDnsArgs(parser, instance=True)
if support_network_tier:
instances_flags.AddNetworkTierArgs(parser, instance=True)
parser.add_argument(
'--description',
help='Specifies a textual description of the instances.')
instances_flags.INSTANCES_ARG.AddArgument(parser)
csek_utils.AddCsekKeyArgs(parser)
# TODO(b/33434068) Refactor away ImageExpander and ZoneResourceFetcher
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Create Google Compute Engine virtual machine instances."""
_support_public_dns = False
_support_network_tier = False
def __init__(self, *args, **kwargs):
super(Create, self).__init__(*args, **kwargs)
self.__resource_spec = None
self._compute_holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
@classmethod
def Args(cls, parser):
_CommonArgs(parser, multiple_network_interface_cards=False,
release_track=base.ReleaseTrack.GA,
support_alias_ip_ranges=False,
support_public_dns=cls._support_public_dns,
support_network_tier=cls._support_network_tier)
@property
def resource_type(self):
return 'instances'
@property
def compute_client(self):
return self._compute_holder.client
@property
def messages(self):
return self.compute_client.messages
@property
def compute(self):
return self.compute_client.apitools_client
@property
def project(self):
return properties.VALUES.core.project.Get(required=True)
@property
def resources(self):
return self._compute_holder.resources
# absence of any of these properties triggers exception in tests
@property
def http(self):
return self.compute.http
@property
def batch_url(self):
return self.compute_client.batch_url
@property
def _resource_spec(self):
if self.__resource_spec is None:
# Constructing the spec can be potentially expensive (e.g.,
# generating the set of valid fields from the protobuf message),
self.__resource_spec = resource_specs.GetSpec(
self.resource_type, self.messages, self.compute_client.api_version)
return self.__resource_spec
@property
def transformations(self):
if self._resource_spec:
return self._resource_spec.transformations
else:
return None
def Collection(self):
return 'compute.instances'
def Format(self, args):
return self.ListFormat(args)
def _CreateRequests(self, args):
instances_flags.ValidateDiskFlags(args)
instances_flags.ValidateLocalSsdFlags(args)
instances_flags.ValidateNicFlags(args)
instances_flags.ValidateServiceAccountAndScopeArgs(args)
instances_flags.ValidateAcceleratorArgs(args)
# This feature is only exposed in alpha/beta
allow_rsa_encrypted = self.ReleaseTrack() in [base.ReleaseTrack.ALPHA,
base.ReleaseTrack.BETA]
self.csek_keys = csek_utils.CsekKeyStore.FromArgs(args, allow_rsa_encrypted)
scheduling = instance_utils.CreateSchedulingMessage(
messages=self.messages,
maintenance_policy=args.maintenance_policy,
preemptible=args.preemptible,
restart_on_failure=args.restart_on_failure)
if args.tags:
tags = self.messages.Tags(items=args.tags)
else:
tags = None
metadata = metadata_utils.ConstructMetadataMessage(
self.messages,
metadata=args.metadata,
metadata_from_file=args.metadata_from_file)
# If the user already provided an initial Windows password and
# username through metadata, then there is no need to check
# whether the image or the boot disk is Windows.
boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size)
utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type)
instance_refs = instances_flags.INSTANCES_ARG.ResolveAsResource(
args, self.resources, scope_lister=flags.GetDefaultScopeLister(
self.compute_client, self.project))
# Check if the zone is deprecated or has maintenance coming.
zone_resource_fetcher = zone_utils.ZoneResourceFetcher(self.compute_client)
zone_resource_fetcher.WarnForZonalCreation(instance_refs)
network_interface_arg = getattr(args, 'network_interface', None)
if network_interface_arg:
network_interfaces = instance_utils.CreateNetworkInterfaceMessages(
resources=self.resources,
compute_client=self.compute_client,
network_interface_arg=network_interface_arg,
instance_refs=instance_refs,
support_network_tier=self._support_network_tier)
else:
if self._support_public_dns is True:
instances_flags.ValidatePublicDnsFlags(args)
network_tier = getattr(args, 'network_tier', None)
network_interfaces = [
instance_utils.CreateNetworkInterfaceMessage(
resources=self.resources,
compute_client=self.compute_client,
network=args.network,
subnet=args.subnet,
private_network_ip=args.private_network_ip,
no_address=args.no_address,
address=args.address,
instance_refs=instance_refs,
network_tier=network_tier,
no_public_dns=getattr(args, 'no_public_dns', None),
public_dns=getattr(args, 'public_dns', None),
no_public_ptr=getattr(args, 'no_public_ptr', None),
public_ptr=getattr(args, 'public_ptr', None),
no_public_ptr_domain=getattr(args, 'no_public_ptr_domain', None),
public_ptr_domain=getattr(args, 'public_ptr_domain', None))
]
machine_type_uris = instance_utils.CreateMachineTypeUris(
resources=self.resources,
compute_client=self.compute_client,
project=self.project,
machine_type=args.machine_type,
custom_cpu=args.custom_cpu,
custom_memory=args.custom_memory,
ext=getattr(args, 'custom_extensions', None),
instance_refs=instance_refs)
create_boot_disk = not instance_utils.UseExistingBootDisk(args.disk or [])
if create_boot_disk:
image_expander = image_utils.ImageExpander(self.compute_client,
self.resources)
image_uri, _ = image_expander.ExpandImageFlag(
user_project=self.project,
image=args.image,
image_family=args.image_family,
image_project=args.image_project,
return_image_resource=False)
else:
image_uri = None
# A list of lists where the element at index i contains a list of
# disk messages that should be set for the instance at index i.
disks_messages = []
# A mapping of zone to boot disk references for all existing boot
# disks that are being attached.
# TODO(user): Simplify this once resources.Resource becomes
# hashable.
existing_boot_disks = {}
for instance_ref in instance_refs:
persistent_disks, boot_disk_ref = (
instance_utils.CreatePersistentAttachedDiskMessages(
self.resources, self.compute_client, self.csek_keys,
args.disk or [], instance_ref))
persistent_create_disks = (
instance_utils.CreatePersistentCreateDiskMessages(
self,
self.compute_client,
self.resources,
self.csek_keys,
getattr(args, 'create_disk', []),
instance_ref))
local_ssds = []
for x in args.local_ssd or []:
local_ssds.append(
instance_utils.CreateLocalSsdMessage(
self.resources,
self.messages,
x.get('device-name'),
x.get('interface'),
instance_ref.zone)
)
if create_boot_disk:
boot_disk = instance_utils.CreateDefaultBootAttachedDiskMessage(
self.compute_client, self.resources,
disk_type=args.boot_disk_type,
disk_device_name=args.boot_disk_device_name,
disk_auto_delete=args.boot_disk_auto_delete,
disk_size_gb=boot_disk_size_gb,
require_csek_key_create=(
args.require_csek_key_create if self.csek_keys else None),
image_uri=image_uri,
instance_ref=instance_ref,
csek_keys=self.csek_keys)
persistent_disks = [boot_disk] + persistent_disks
else:
existing_boot_disks[boot_disk_ref.zone] = boot_disk_ref
disks_messages.append(persistent_disks + persistent_create_disks +
local_ssds)
accelerator_args = getattr(args, 'accelerator', None)
project_to_sa = {}
requests = []
for instance_ref, machine_type_uri, disks in zip(
instance_refs, machine_type_uris, disks_messages):
if instance_ref.project not in project_to_sa:
scopes = None
if not args.no_scopes and not args.scopes:
# User didn't provide any input on scopes. If project has no default
# service account then we want to create a VM with no scopes
request = (self.compute.projects,
'Get',
self.messages.ComputeProjectsGetRequest(
project=instance_ref.project))
errors = []
result = self.compute_client.MakeRequests([request], errors)
if not errors:
if not result[0].defaultServiceAccount:
scopes = []
log.status.Print(
'There is no default service account for project {}. '
'Instance {} will not have scopes.'.format(
instance_ref.project, instance_ref.Name))
if scopes is None:
scopes = [] if args.no_scopes else args.scopes
if args.no_service_account:
service_account = None
else:
service_account = args.service_account
service_accounts = instance_utils.CreateServiceAccountMessages(
messages=self.messages,
scopes=scopes,
service_account=service_account)
project_to_sa[instance_ref.project] = service_accounts
instance = self.messages.Instance(
canIpForward=args.can_ip_forward,
disks=disks,
description=args.description,
machineType=machine_type_uri,
metadata=metadata,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=project_to_sa[instance_ref.project],
scheduling=scheduling,
tags=tags)
if getattr(args, 'min_cpu_platform', None):
instance.minCpuPlatform = args.min_cpu_platform
if accelerator_args:
accelerator_type_name = accelerator_args['type']
accelerator_type_ref = self.resources.Parse(
accelerator_type_name,
collection='compute.acceleratorTypes',
params={'project': instance_ref.project,
'zone': instance_ref.zone})
# Accelerator count is default to 1.
accelerator_count = int(accelerator_args.get('count', 1))
accelerators = instance_utils.CreateAcceleratorConfigMessages(
self.compute_client.messages, accelerator_type_ref,
accelerator_count)
instance.guestAccelerators = accelerators
request = self.messages.ComputeInstancesInsertRequest(
instance=instance,
project=instance_ref.project,
zone=instance_ref.zone)
sole_tenancy_host_arg = getattr(args, 'sole_tenancy_host', None)
if sole_tenancy_host_arg:
sole_tenancy_host_ref = self.resources.Parse(
sole_tenancy_host_arg, collection='compute.hosts',
params={'zone': instance_ref.zone})
request.instance.host = sole_tenancy_host_ref.SelfLink()
requests.append((self.compute.instances, 'Insert', request))
return requests
def Run(self, args):
errors = []
requests = self._CreateRequests(args)
resource_list = self.compute_client.MakeRequests(requests, errors)
# changes machine type uri to just machine type name
resource_list = lister.ProcessResults(
resources=resource_list,
field_selector=property_selector.PropertySelector(
properties=None,
transformations=self.transformations))
if errors:
utils.RaiseToolException(errors)
return resource_list
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateBeta(Create):
"""Create Google Compute Engine virtual machine instances."""
_support_public_dns = False
_support_network_tier = False
@classmethod
def Args(cls, parser):
_CommonArgs(parser, multiple_network_interface_cards=False,
release_track=base.ReleaseTrack.BETA,
support_alias_ip_ranges=False,
support_public_dns=cls._support_public_dns,
support_network_tier=cls._support_network_tier)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateAlpha(Create):
"""Create Google Compute Engine virtual machine instances."""
_support_public_dns = True
_support_network_tier = True
@classmethod
def Args(cls, parser):
parser.add_argument('--sole-tenancy-host', help=argparse.SUPPRESS)
_CommonArgs(parser, multiple_network_interface_cards=True,
release_track=base.ReleaseTrack.ALPHA,
support_alias_ip_ranges=True,
support_public_dns=cls._support_public_dns,
support_network_tier=cls._support_network_tier,
enable_regional=True)
instances_flags.AddMinCpuPlatformArgs(parser)
Create.detailed_help = DETAILED_HELP
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import inspect
import logging
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import semaphore
from quantum.openstack.common import cfg
from quantum.openstack.common import excutils
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common import local
from quantum.openstack.common.rpc import common as rpc_common
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug('Pool creating new connection')
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new
Connection, or one from a pool. The function will also catch
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so
forth without making the caller be responsible for catching
them. If possible the function makes sure to return a
connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool"""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self"""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance"""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
ending=False):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
conn.direct_send(msg_id, msg)
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None):
if self.msg_id:
msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
ending)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class ProxyCallback(object):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
self.proxy = proxy
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version', None)
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
def _process_data(self, ctxt, version, method, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, **args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except Exception as e:
LOG.exception('Exception during message handling')
ctxt.reply(None, sys.exc_info(),
connection_pool=self.connection_pool)
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(timeout=timeout or
conf.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
self._conf = conf
def done(self):
if self._done:
return
self._done = True
self._iterator.close()
self._iterator = None
self._connection.close()
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
self._result = data['result']
def __iter__(self):
"""Return a result until we get a 'None' response from consumer"""
if self._done:
raise StopIteration
while True:
try:
self._iterator.next()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
result = self._result
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection"""
return ConnectionContext(conf, connection_pool, pooled=not new)
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
LOG.debug(_('Making asynchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
pack_context(msg, context)
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
conn.topic_send(topic, msg)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, msg)
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, msg)
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, msg)
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, msg)
def notify(conf, context, topic, msg, connection_pool):
"""Sends a notification event on a topic."""
event_type = msg.get('event_type')
LOG.debug(_('Sending %(event_type)s on %(topic)s'), locals())
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
try:
return conf.control_exchange
except cfg.NoSuchOptError:
return 'openstack'
|
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh
from mmdet.core.bbox.samplers import PseudoSampler
from ..builder import HEADS
from .cascade_roi_head import CascadeRoIHead
@HEADS.register_module()
class SparseRoIHead(CascadeRoIHead):
r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_
and `Instances as Queries <http://arxiv.org/abs/2105.01928>`_
Args:
num_stages (int): Number of stage whole iterative process.
Defaults to 6.
stage_loss_weights (Tuple[float]): The loss
weight of each stage. By default all stages have
the same weight 1.
bbox_roi_extractor (dict): Config of box roi extractor.
mask_roi_extractor (dict): Config of mask roi extractor.
bbox_head (dict): Config of box head.
mask_head (dict): Config of mask head.
train_cfg (dict, optional): Configuration information in train stage.
Defaults to None.
test_cfg (dict, optional): Configuration information in test stage.
Defaults to None.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_stages=6,
stage_loss_weights=(1, 1, 1, 1, 1, 1),
proposal_feature_channel=256,
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_roi_extractor=None,
bbox_head=dict(
type='DIIHead',
num_classes=80,
num_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
hidden_channels=256,
dropout=0.0,
roi_feat_size=7,
ffn_act_cfg=dict(type='ReLU', inplace=True)),
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
assert len(stage_loss_weights) == num_stages
self.num_stages = num_stages
self.stage_loss_weights = stage_loss_weights
self.proposal_feature_channel = proposal_feature_channel
super(SparseRoIHead, self).__init__(
num_stages,
stage_loss_weights,
bbox_roi_extractor=bbox_roi_extractor,
mask_roi_extractor=mask_roi_extractor,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
# train_cfg would be None when run the test.py
if train_cfg is not None:
for stage in range(num_stages):
assert isinstance(self.bbox_sampler[stage], PseudoSampler), \
'Sparse R-CNN and QueryInst only support `PseudoSampler`'
def _bbox_forward(self, stage, x, rois, object_feats, img_metas):
"""Box head forward function used in both training and testing. Returns
all regression, classification results and a intermediate feature.
Args:
stage (int): The index of current stage in
iterative process.
x (List[Tensor]): List of FPN features
rois (Tensor): Rois in total batch. With shape (num_proposal, 5).
the last dimension 5 represents (img_index, x1, y1, x2, y2).
object_feats (Tensor): The object feature extracted from
the previous stage.
img_metas (dict): meta information of images.
Returns:
dict[str, Tensor]: a dictionary of bbox head outputs,
Containing the following results:
- cls_score (Tensor): The score of each class, has
shape (batch_size, num_proposals, num_classes)
when use focal loss or
(batch_size, num_proposals, num_classes+1)
otherwise.
- decode_bbox_pred (Tensor): The regression results
with shape (batch_size, num_proposal, 4).
The last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- object_feats (Tensor): The object feature extracted
from current stage
- detach_cls_score_list (list[Tensor]): The detached
classification results, length is batch_size, and
each tensor has shape (num_proposal, num_classes).
- detach_proposal_list (list[tensor]): The detached
regression results, length is batch_size, and each
tensor has shape (num_proposal, 4). The last
dimension 4 represents [tl_x, tl_y, br_x, br_y].
"""
num_imgs = len(img_metas)
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
cls_score, bbox_pred, object_feats, attn_feats = bbox_head(
bbox_feats, object_feats)
proposal_list = self.bbox_head[stage].refine_bboxes(
rois,
rois.new_zeros(len(rois)), # dummy arg
bbox_pred.view(-1, bbox_pred.size(-1)),
[rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)],
img_metas)
bbox_results = dict(
cls_score=cls_score,
decode_bbox_pred=torch.cat(proposal_list),
object_feats=object_feats,
attn_feats=attn_feats,
# detach then use it in label assign
detach_cls_score_list=[
cls_score[i].detach() for i in range(num_imgs)
],
detach_proposal_list=[item.detach() for item in proposal_list])
return bbox_results
def _mask_forward(self, stage, x, rois, attn_feats):
"""Mask head forward function used in both training and testing."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
mask_pred = mask_head(mask_feats, attn_feats)
mask_results = dict(mask_pred=mask_pred)
return mask_results
def _mask_forward_train(self, stage, x, attn_feats, sampling_results,
gt_masks, rcnn_train_cfg):
"""Run forward function and calculate loss for mask head in
training."""
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
attn_feats = torch.cat([
feats[res.pos_inds]
for (feats, res) in zip(attn_feats, sampling_results)
])
mask_results = self._mask_forward(stage, x, pos_rois, attn_feats)
mask_targets = self.mask_head[stage].get_targets(
sampling_results, gt_masks, rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results.update(loss_mask)
return mask_results
def forward_train(self,
x,
proposal_boxes,
proposal_features,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
imgs_whwh=None,
gt_masks=None):
"""Forward function in training stage.
Args:
x (list[Tensor]): list of multi-level img features.
proposals (Tensor): Decoded proposal bboxes, has shape
(batch_size, num_proposals, 4)
proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel)
img_metas (list[dict]): list of image info dict where
each dict has: 'img_shape', 'scale_factor', 'flip',
and may also contain 'filename', 'ori_shape',
'pad_shape', and 'img_norm_cfg'. For details on the
values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
imgs_whwh (Tensor): Tensor with shape (batch_size, 4),
the dimension means
[img_width,img_height, img_width, img_height].
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components of all stage.
"""
num_imgs = len(img_metas)
num_proposals = proposal_boxes.size(1)
imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1)
all_stage_bbox_results = []
proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]
object_feats = proposal_features
all_stage_loss = {}
for stage in range(self.num_stages):
rois = bbox2roi(proposal_list)
bbox_results = self._bbox_forward(stage, x, rois, object_feats,
img_metas)
all_stage_bbox_results.append(bbox_results)
if gt_bboxes_ignore is None:
# TODO support ignore
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
cls_pred_list = bbox_results['detach_cls_score_list']
proposal_list = bbox_results['detach_proposal_list']
for i in range(num_imgs):
normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] /
imgs_whwh[i])
assign_result = self.bbox_assigner[stage].assign(
normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i],
gt_labels[i], img_metas[i])
sampling_result = self.bbox_sampler[stage].sample(
assign_result, proposal_list[i], gt_bboxes[i])
sampling_results.append(sampling_result)
bbox_targets = self.bbox_head[stage].get_targets(
sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage],
True)
cls_score = bbox_results['cls_score']
decode_bbox_pred = bbox_results['decode_bbox_pred']
single_stage_loss = self.bbox_head[stage].loss(
cls_score.view(-1, cls_score.size(-1)),
decode_bbox_pred.view(-1, 4),
*bbox_targets,
imgs_whwh=imgs_whwh)
if self.with_mask:
mask_results = self._mask_forward_train(
stage, x, bbox_results['attn_feats'], sampling_results,
gt_masks, self.train_cfg[stage])
single_stage_loss['loss_mask'] = mask_results['loss_mask']
for key, value in single_stage_loss.items():
all_stage_loss[f'stage{stage}_{key}'] = value * \
self.stage_loss_weights[stage]
object_feats = bbox_results['object_feats']
return all_stage_loss
def simple_test(self,
x,
proposal_boxes,
proposal_features,
img_metas,
imgs_whwh,
rescale=False):
"""Test without augmentation.
Args:
x (list[Tensor]): list of multi-level img features.
proposal_boxes (Tensor): Decoded proposal bboxes, has shape
(batch_size, num_proposals, 4)
proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel)
img_metas (dict): meta information of images.
imgs_whwh (Tensor): Tensor with shape (batch_size, 4),
the dimension means
[img_width,img_height, img_width, img_height].
rescale (bool): If True, return boxes in original image
space. Defaults to False.
Returns:
list[list[np.ndarray]] or list[tuple]: When no mask branch,
it is bbox results of each image and classes with type
`list[list[np.ndarray]]`. The outer list
corresponds to each image. The inner list
corresponds to each class. When the model has a mask branch,
it is a list[tuple] that contains bbox results and mask results.
The outer list corresponds to each image, and first element
of tuple is bbox results, second element is mask results.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
# Decode initial proposals
num_imgs = len(img_metas)
proposal_list = [proposal_boxes[i] for i in range(num_imgs)]
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
object_feats = proposal_features
if all([proposal.shape[0] == 0 for proposal in proposal_list]):
# There is no proposal in the whole batch
bbox_results = [[
np.zeros((0, 5), dtype=np.float32)
for i in range(self.bbox_head[-1].num_classes)
]] * num_imgs
return bbox_results
for stage in range(self.num_stages):
rois = bbox2roi(proposal_list)
bbox_results = self._bbox_forward(stage, x, rois, object_feats,
img_metas)
object_feats = bbox_results['object_feats']
cls_score = bbox_results['cls_score']
proposal_list = bbox_results['detach_proposal_list']
if self.with_mask:
rois = bbox2roi(proposal_list)
mask_results = self._mask_forward(stage, x, rois,
bbox_results['attn_feats'])
mask_results['mask_pred'] = mask_results['mask_pred'].reshape(
num_imgs, -1, *mask_results['mask_pred'].size()[1:])
num_classes = self.bbox_head[-1].num_classes
det_bboxes = []
det_labels = []
if self.bbox_head[-1].loss_cls.use_sigmoid:
cls_score = cls_score.sigmoid()
else:
cls_score = cls_score.softmax(-1)[..., :-1]
for img_id in range(num_imgs):
cls_score_per_img = cls_score[img_id]
scores_per_img, topk_indices = cls_score_per_img.flatten(
0, 1).topk(
self.test_cfg.max_per_img, sorted=False)
labels_per_img = topk_indices % num_classes
bbox_pred_per_img = proposal_list[img_id][topk_indices //
num_classes]
if rescale:
scale_factor = img_metas[img_id]['scale_factor']
bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor)
det_bboxes.append(
torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1))
det_labels.append(labels_per_img)
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i], num_classes)
for i in range(num_imgs)
]
if self.with_mask:
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i][:, :4]
for i in range(len(det_bboxes))
]
segm_results = []
mask_pred = mask_results['mask_pred']
for img_id in range(num_imgs):
mask_pred_per_img = mask_pred[img_id].flatten(0,
1)[topk_indices]
mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat(
1, num_classes, 1, 1)
segm_result = self.mask_head[-1].get_seg_masks(
mask_pred_per_img, _bboxes[img_id], det_labels[img_id],
self.test_cfg, ori_shapes[img_id], scale_factors[img_id],
rescale)
segm_results.append(segm_result)
if self.with_mask:
results = list(zip(bbox_results, segm_results))
else:
results = bbox_results
return results
def aug_test(self, features, proposal_list, img_metas, rescale=False):
raise NotImplementedError(
'Sparse R-CNN and QueryInst does not support `aug_test`')
def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas):
"""Dummy forward function when do the flops computing."""
all_stage_bbox_results = []
proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]
object_feats = proposal_features
if self.with_bbox:
for stage in range(self.num_stages):
rois = bbox2roi(proposal_list)
bbox_results = self._bbox_forward(stage, x, rois, object_feats,
img_metas)
all_stage_bbox_results.append((bbox_results, ))
proposal_list = bbox_results['detach_proposal_list']
object_feats = bbox_results['object_feats']
if self.with_mask:
rois = bbox2roi(proposal_list)
mask_results = self._mask_forward(
stage, x, rois, bbox_results['attn_feats'])
all_stage_bbox_results[-1] += (mask_results, )
return all_stage_bbox_results
|
|
import boto3
import os
import time
from boto3.exceptions import Boto3Error
import logging
from .cloud_provider import CloudProviderBase
from .node import Node
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
AWS_REGION = 'us-east-2'
AWS_REGION_AZ = 'us-east-2a'
AWS_SECURITY_GROUP = os.environ.get("AWS_SECURITY_GROUPS",
'sg-0e753fd5550206e55')
AWS_SECURITY_GROUPS = [AWS_SECURITY_GROUP]
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_CICD_INSTANCE_TAG = os.environ.get(
"AWS_CICD_INSTANCE_TAG", 'rancher-validation')
AWS_INSTANCE_TYPE = os.environ.get("AWS_INSTANCE_TYPE", 't2.medium')
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
PRIVATE_IMAGES = {
"rancheros-v1.5.1-docker-native": {
'image': 'ami-00769ca587d8e100c', 'ssh_user': 'rancher'},
"rhel-7.6-docker-native-113": {
'image': 'ami-04b9aacf7e1512c0b', 'ssh_user': 'ec2-user'},
"suse-sles12-sp2-docker-18061ce": {
'image': 'ami-0cc154aeb82bd8fa0', 'ssh_user': 'ec2-user'},
"ubuntu-16.04-docker-18.09": {
'image': 'ami-07e968eb9151b2599', 'ssh_user': 'ubuntu'},
"ubuntu-18.04-docker-18.09": {
'image': 'ami-02dcbc347c866fb5d', 'ssh_user': 'ubuntu'},
"rhel-7.6-docker-18.09": {
'image': 'ami-094574ffb6efb3a9b', 'ssh_user': 'ec2-user'}}
PUBLIC_AMI = {
'us-east-2': {
"ubuntu-16.04": {
'image': 'ami-965e6bf3', 'ssh_user': 'ubuntu'},
"rhel-7.4": {
'image': 'ami-0b1e356e', 'ssh_user': 'ec2-user'},
"ros-1.4.0": {
'image': 'ami-504b7435', 'ssh_user': 'rancher'}},
'us-east-1': {
"ubuntu-16.04": {
'image': 'ami-cf6c47aa', 'ssh_user': 'ubuntu'},
"rhel-7.4": {
'image': 'ami-0b1e356e', 'ssh_user': 'ec2-user'}}
}
class AmazonWebServices(CloudProviderBase):
def __init__(self):
self._client = boto3.client(
'ec2',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION)
self.master_ssh_key = None
self.master_ssh_key_path = None
if AWS_SSH_KEY_NAME:
self.master_ssh_key = self.get_ssh_key(AWS_SSH_KEY_NAME)
self.master_ssh_key_path = self.get_ssh_key_path(AWS_SSH_KEY_NAME)
# Used for cleanup
self.created_node = []
self.created_keys = []
# called if docker is to be installed
def _select_private_ami(self, os_version=None, docker_version=None):
os_version = os_version or self.OS_VERSION
docker_version = docker_version or self.DOCKER_VERSION
image = PRIVATE_IMAGES[
"{}-docker-{}".format(os_version, docker_version)]
return image['image'], image['ssh_user']
def _select_ami(self, os_version=None):
image = PUBLIC_AMI[AWS_REGION][os_version]
return image['image'], image['ssh_user']
def create_node(
self, node_name, key_name=None, os_version=None, docker_version=None,
wait_for_ready=True):
os_version = os_version or self.OS_VERSION
docker_version = docker_version or self.DOCKER_VERSION
if self.DOCKER_INSTALLED.lower() == 'false':
image, ssh_user = self._select_ami(os_version)
else:
image, ssh_user = self._select_private_ami(
os_version, docker_version)
if key_name:
# if cert private key
if key_name.endswith('.pem'):
ssh_private_key_name = key_name
ssh_private_key = self.get_ssh_key(key_name)
ssh_private_key_path = self.get_ssh_key_path(key_name)
else:
# get private key
ssh_private_key_name = key_name.replace('.pub', '')
ssh_private_key = self.get_ssh_key(ssh_private_key_name)
ssh_private_key_path = self.get_ssh_key_path(
ssh_private_key_name)
else:
key_name = AWS_SSH_KEY_NAME.replace('.pem', '')
ssh_private_key_name = key_name
ssh_private_key = self.master_ssh_key
ssh_private_key_path = self.master_ssh_key_path
args = {"ImageId": image,
"InstanceType": AWS_INSTANCE_TYPE,
"MinCount": 1,
"MaxCount": 1,
"TagSpecifications": [{'ResourceType': 'instance', 'Tags': [
{'Key': 'Name', 'Value': node_name},
{'Key': 'CICD', 'Value': AWS_CICD_INSTANCE_TAG}]}],
"KeyName": key_name,
"NetworkInterfaces": [{
'DeviceIndex': 0,
'AssociatePublicIpAddress': True,
'Groups': AWS_SECURITY_GROUPS}],
"Placement": {'AvailabilityZone': AWS_REGION_AZ},
"BlockDeviceMappings":
[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}]
}
if (len(AWS_IAM_PROFILE) > 0):
args["IamInstanceProfile"] = {'Name': AWS_IAM_PROFILE}
instance = self._client.run_instances(**args)
node = Node(
provider_node_id=instance['Instances'][0]['InstanceId'],
state=instance['Instances'][0]['State']['Name'],
ssh_user=ssh_user,
ssh_key_name=ssh_private_key_name,
ssh_key_path=ssh_private_key_path,
ssh_key=ssh_private_key,
os_version=os_version,
docker_version=docker_version)
# mark for clean up at the end
self.created_node.append(node.provider_node_id)
if wait_for_ready:
node = self.wait_for_node_state(node)
node.ready_node()
return node
def create_multiple_nodes(
self, number_of_nodes, node_name_prefix, os_version=None,
docker_version=None, key_name=None, wait_for_ready=True):
nodes = []
for i in range(number_of_nodes):
node_name = "{}_{}".format(node_name_prefix, i)
nodes.append(self.create_node(
node_name, key_name=key_name, os_version=os_version,
docker_version=docker_version, wait_for_ready=False))
if wait_for_ready:
nodes = self.wait_for_nodes_state(nodes)
# hack for instances
if self.DOCKER_INSTALLED.lower() == 'true':
time.sleep(5)
self.reboot_nodes(nodes)
time.sleep(10)
nodes = self.wait_for_nodes_state(nodes)
for node in nodes:
node.ready_node()
return nodes
def get_node(self, provider_id):
node_filter = [{
'Name': 'instance-id', 'Values': [provider_id]}]
try:
response = self._client.describe_instances(Filters=node_filter)
nodes = response.get('Reservations', [])
if len(nodes) == 0:
return None # no node found
aws_node = nodes[0]['Instances'][0]
node = Node(
provider_node_id=provider_id,
# node_name= aws_node tags?,
host_name=aws_node.get('PublicDnsName'),
public_ip_address=aws_node.get('PublicIpAddress'),
private_ip_address=aws_node.get('PrivateIpAddress'),
state=aws_node['State']['Name'])
return node
except Boto3Error as e:
msg = "Failed while querying instance '{}' state!: {}".format(
node.node_id, str(e))
raise RuntimeError(msg)
def update_node(self, node):
node_filter = [{
'Name': 'instance-id', 'Values': [node.provider_node_id]}]
try:
response = self._client.describe_instances(Filters=node_filter)
nodes = response.get('Reservations', [])
if len(nodes) == 0 or len(nodes[0]['Instances']) == 0:
return node
aws_node = nodes[0]['Instances'][0]
node.state = aws_node['State']['Name']
node.host_name = aws_node.get('PublicDnsName')
node.public_ip_address = aws_node.get('PublicIpAddress')
node.private_ip_address = aws_node.get('PrivateIpAddress')
return node
except Boto3Error as e:
msg = "Failed while querying instance '{}' state!: {}".format(
node.node_id, str(e))
raise RuntimeError(msg)
def start_node(self, node, wait_for_start=True):
self._client.start_instances(
InstanceIds=[node.provider_node_id])
if wait_for_start:
node = self.wait_for_node_state(node)
return node
def reboot_nodes(self, nodes):
instances = [node.provider_node_id for node in nodes]
self._client.reboot_instances(
InstanceIds=instances)
return
def stop_node(self, node, wait_for_stopped=False):
self._client.stop_instances(
InstanceIds=[node.provider_node_id])
if wait_for_stopped:
node = self.wait_for_node_state(node, 'stopped')
return node
def delete_node(self, node, wait_for_deleted=False):
self._client.terminate_instances(
InstanceIds=[node.provider_node_id])
if wait_for_deleted:
node = self.wait_for_node_state(node, 'terminated')
return node
def wait_for_node_state(self, node, state='running'):
# 'running', 'stopped', 'terminated'
timeout = 300
start_time = time.time()
while time.time() - start_time < timeout:
node = self.update_node(node)
if node.state == state:
return node
time.sleep(5)
def wait_for_nodes_state(self, nodes, state='running'):
# 'running', 'stopped', 'terminated'
timeout = 300
start_time = time.time()
completed_nodes = []
while time.time() - start_time < timeout:
for node in nodes:
if len(completed_nodes) == len(nodes):
time.sleep(20) # Give the node some extra time
return completed_nodes
if node in completed_nodes:
continue
node = self.update_node(node)
if node.state == state:
completed_nodes.append(node)
time.sleep(1)
time.sleep(4)
def import_ssh_key(self, ssh_key_name, public_ssh_key):
self._client.delete_key_pair(KeyName=ssh_key_name)
self._client.import_key_pair(
KeyName=ssh_key_name,
PublicKeyMaterial=public_ssh_key)
# mark keys for cleanup
self.created_keys.append(ssh_key_name)
def delete_ssh_key(self, ssh_key_name):
self._client.delete_key_pair(KeyName=ssh_key_name)
def get_nodes(self, filters):
try:
response = self._client.describe_instances(Filters=filters)
nodes = response.get('Reservations', [])
if len(nodes) == 0:
return None # no node found
ret_nodes = []
for aws_node_i in nodes:
aws_node = aws_node_i['Instances'][0]
node = Node(
provider_node_id=aws_node.get('InstanceId'),
# node_name= aws_node tags?,
host_name=aws_node.get('PublicDnsName'),
public_ip_address=aws_node.get('PublicIpAddress'),
private_ip_address=aws_node.get('PrivateIpAddress'),
state=aws_node['State']['Name'])
ret_nodes.append(node)
return ret_nodes
except Boto3Error as e:
msg = "Failed while getting instances: {}".format(str(e))
raise RuntimeError(msg)
def delete_nodes(self, nodes, wait_for_deleted=False):
instance_ids = [node.provider_node_id for node in nodes]
self._client.terminate_instances(InstanceIds=instance_ids)
if wait_for_deleted:
for node in nodes:
node = self.wait_for_node_state(node, 'terminated')
def delete_keypairs(self, name_prefix):
if len(name_prefix) > 0:
key_pairs = self._client.describe_key_pairs()
print(key_pairs["KeyPairs"])
key_pair_list = key_pairs["KeyPairs"]
print(len(key_pair_list))
for key in key_pair_list:
keyName = key["KeyName"]
if keyName.startswith(name_prefix):
print(keyName)
self._client.delete_key_pair(KeyName=keyName)
|
|
#!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_ethernet_network
short_description: Manage OneView Ethernet Network resources
description:
- Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
version_added: "2.4"
requirements:
- hpOneView >= 3.1.0
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
state:
description:
- Indicates the desired state for the Ethernet Network resource.
- C(present) will ensure data properties are compliant with OneView.
- C(absent) will remove the resource from OneView, if it exists.
- C(default_bandwidth_reset) will reset the network connection template to the default.
default: present
choices: [present, absent, default_bandwidth_reset]
data:
description:
- List with Ethernet Network properties.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Ensure that the Ethernet Network is present using the default configuration
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: 'Test Ethernet Network'
vlanId: '201'
delegate_to: localhost
- name: Update the Ethernet Network changing bandwidth and purpose
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: 'Test Ethernet Network'
purpose: Management
bandwidth:
maximumBandwidth: 3000
typicalBandwidth: 2000
delegate_to: localhost
- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: 'Test Ethernet Network'
newName: 'Renamed Ethernet Network'
delegate_to: localhost
- name: Ensure that the Ethernet Network is absent
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: absent
data:
name: 'New Ethernet Network'
delegate_to: localhost
- name: Create Ethernet networks in bulk
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
vlanIdRange: '1-10,15,17'
purpose: General
namePrefix: TestNetwork
smartLink: false
privateNetwork: false
bandwidth:
maximumBandwidth: 10000
typicalBandwidth: 2000
delegate_to: localhost
- name: Reset to the default network connection template
oneview_ethernet_network:
config: '/etc/oneview/oneview_config.json'
state: default_bandwidth_reset
data:
name: 'Test Ethernet Network'
delegate_to: localhost
'''
RETURN = '''
ethernet_network:
description: Has the facts about the Ethernet Networks.
returned: On state 'present'. Can be null.
type: dict
ethernet_network_bulk:
description: Has the facts about the Ethernet Networks affected by the bulk insert.
returned: When 'vlanIdRange' attribute is in data argument. Can be null.
type: dict
ethernet_network_connection_template:
description: Has the facts about the Ethernet Network Connection Template.
returned: On state 'default_bandwidth_reset'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
class EthernetNetworkModule(OneViewModuleBase):
MSG_CREATED = 'Ethernet Network created successfully.'
MSG_UPDATED = 'Ethernet Network updated successfully.'
MSG_DELETED = 'Ethernet Network deleted successfully.'
MSG_ALREADY_PRESENT = 'Ethernet Network is already present.'
MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.'
MSG_BULK_CREATED = 'Ethernet Networks created successfully.'
MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.'
MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.'
MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.'
MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.'
RESOURCE_FACT_NAME = 'ethernet_network'
def __init__(self):
argument_spec = dict(
state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']),
data=dict(type='dict', required=True),
)
super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True)
self.resource_client = self.oneview_client.ethernet_networks
def execute_module(self):
changed, msg, ansible_facts, resource = False, '', {}, None
if self.data.get('name'):
resource = self.get_by_name(self.data['name'])
if self.state == 'present':
if self.data.get('vlanIdRange'):
return self._bulk_present()
else:
return self._present(resource)
elif self.state == 'absent':
return self.resource_absent(resource)
elif self.state == 'default_bandwidth_reset':
changed, msg, ansible_facts = self._default_bandwidth_reset(resource)
return dict(changed=changed, msg=msg, ansible_facts=ansible_facts)
def _present(self, resource):
bandwidth = self.data.pop('bandwidth', None)
scope_uris = self.data.pop('scopeUris', None)
result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
if bandwidth:
if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]:
result['changed'] = True
result['msg'] = self.MSG_UPDATED
if scope_uris is not None:
result = self.resource_scopes_set(result, 'ethernet_network', scope_uris)
return result
def _bulk_present(self):
vlan_id_range = self.data['vlanIdRange']
result = dict(ansible_facts={})
ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
if not ethernet_networks:
self.resource_client.create_bulk(self.data)
result['changed'] = True
result['msg'] = self.MSG_BULK_CREATED
else:
vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range)
for net in ethernet_networks[:]:
vlan_ids.remove(net['vlanId'])
if len(vlan_ids) == 0:
result['msg'] = self.MSG_BULK_ALREADY_EXIST
result['changed'] = False
else:
if len(vlan_ids) == 1:
self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
else:
self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
self.resource_client.create_bulk(self.data)
result['changed'] = True
result['msg'] = self.MSG_MISSING_BULK_CREATED
result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
return result
def _update_connection_template(self, ethernet_network, bandwidth):
if 'connectionTemplateUri' not in ethernet_network:
return False, None
connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri'])
merged_data = connection_template.copy()
merged_data.update({'bandwidth': bandwidth})
if not self.compare(connection_template, merged_data):
connection_template = self.oneview_client.connection_templates.update(merged_data)
return True, connection_template
else:
return False, None
def _default_bandwidth_reset(self, resource):
if not resource:
raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND)
default_connection_template = self.oneview_client.connection_templates.get_default()
changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth'])
return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict(
ethernet_network_connection_template=connection_template)
def main():
EthernetNetworkModule().run()
if __name__ == '__main__':
main()
|
|
import pygame as pg
import heapq
from os import path
from random import choice
vec = pg.math.Vector2
TILESIZE = 48
GRIDWIDTH = 30
GRIDHEIGHT = 15
WIDTH = TILESIZE * GRIDWIDTH
HEIGHT = TILESIZE * GRIDHEIGHT
FPS = 60
MOB_SPEED = 5
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
FOREST = (34, 57, 10)
CYAN = (0, 255, 255)
MAGENTA = (255, 0, 255)
YELLOW = (255, 255, 0)
DARKGRAY = (40, 40, 40)
LIGHTGRAY = (140, 140, 140)
font_name = pg.font.match_font('hack')
def draw_text(text, size, color, x, y, align="topleft"):
font = pg.font.Font(font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect(**{align: (x, y)})
screen.blit(text_surface, text_rect)
class Mob(pg.sprite.Sprite):
def __init__(self):
self.groups = all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.image = pg.Surface((10, 10))
self.image.fill(YELLOW)
self.rect = self.image.get_rect()
spawn = vec(choice(list(paths.keys())))
# pixel position
self.pos = vec(spawn.x * TILESIZE + TILESIZE / 2, spawn.y * TILESIZE + TILESIZE / 2)
self.rect.center = self.pos
self.on_path = False
@property
def grid_pos(self):
# find grid pos
return (int(self.pos.x // TILESIZE), int(self.pos.y // TILESIZE))
def update(self):
if self.grid_pos == goal:
self.kill()
return
# if in a node tile, steer to next node
class SquareGrid:
def __init__(self, width, height):
self.width = width
self.height = height
self.walls = []
self.connections = [vec(1, 0), vec(-1, 0), vec(0, 1), vec(0, -1)]
# comment/uncomment this for diagonals:
# self.connections += [vec(1, 1), vec(-1, 1), vec(1, -1), vec(-1, -1)]
def in_bounds(self, node):
return 0 <= node.x < self.width and 0 <= node.y < self.height
def passable(self, node):
return node not in self.walls
def find_neighbors(self, node):
neighbors = [node + connection for connection in self.connections]
neighbors = filter(self.in_bounds, neighbors)
neighbors = filter(self.passable, neighbors)
return neighbors
def draw(self):
for wall in self.walls:
rect = pg.Rect(wall * TILESIZE, (TILESIZE, TILESIZE))
pg.draw.rect(screen, LIGHTGRAY, rect)
class WeightedMesh:
# edges = {(1, 1): {(4, 11): 10, (9, 7): 25}}
def __init__(self):
# locations of nodes on map
# connections and costs from node to node
self.edges = {}
self.edges = {(0, 8): {(2, 8): 20, (0, 1): 70, (0, 13): 50},
(0, 1): {(0, 8): 70, (4, 0): 44},
(2, 8): {(0, 8): 20},
(4, 0): {(0, 1): 44, (8, 2): 48},
(8, 2): {(4, 0): 48, (11, 1): 34, (11, 6): 52},
(0, 13): {(0, 8): 50, (6, 13): 60},
(6, 13): {(0, 13): 60, (11, 10): 72},
(11, 10): {(6, 13): 72, (13, 10): 20},
(13, 10): {(11, 10): 20, (15, 7): 38, (17, 13): 52},
(11, 1): {(8, 2): 34, (15, 3): 48},
(11, 6): {(8, 2): 52, (15, 7): 44},
(15, 7): {(15, 3): 40, (11, 6): 44, (13, 10): 38, (20, 7): 50},
(15, 3): {(11, 1): 48, (15, 7): 40, (17, 2): 24},
(17, 2): {(15, 3): 24, (20, 2): 30},
(20, 2): {(17, 2): 30, (20, 7): 50, (23, 1): 34},
(23, 1): {(20, 2): 34, (27, 1): 40},
(27, 1): {(23, 1): 40, (26, 4): 34},
(26, 4): {(27, 1): 34, (25, 7): 34},
(25, 7): {(26, 4): 34, (20, 7): 50, (25, 10): 30},
(20, 7): {(15, 7): 50, (20, 2): 50, (25, 7): 50},
(25, 10): {(25, 7): 30, (22, 12): 38},
(22, 12): {(25, 10): 38, (17, 13): 54},
(17, 13): {(13, 10): 52, (22, 12): 54}}
def find_neighbors(self, node):
return list(self.edges[node].keys())
def find_nearest(self, tile):
return min(self.edges.keys(), key=lambda n: (abs(n[0] - tile[0]) + abs(n[1] - tile[1])))
def cost(self, from_node, to_node):
return self.edges[from_node][to_node]
def draw(self):
for node in self.edges.keys():
x = int(node[0] * TILESIZE + TILESIZE / 2)
y = int(node[1] * TILESIZE + TILESIZE / 2)
pg.draw.circle(screen, CYAN, (x, y), 10)
for c in self.edges[node]:
cx = c[0] * TILESIZE + TILESIZE / 2
cy = c[1] * TILESIZE + TILESIZE / 2
pg.draw.line(screen, CYAN, (x, y), (cx, cy), 10)
class PriorityQueue:
def __init__(self):
self.nodes = []
def put(self, node, cost):
heapq.heappush(self.nodes, (cost, node))
def get(self):
return heapq.heappop(self.nodes)[1]
def empty(self):
return len(self.nodes) == 0
def draw_grid():
for x in range(0, WIDTH, TILESIZE):
pg.draw.line(screen, LIGHTGRAY, (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, TILESIZE):
pg.draw.line(screen, LIGHTGRAY, (0, y), (WIDTH, y))
def draw_icons():
goal_center = (goal[0] * TILESIZE + TILESIZE / 2, goal[1] * TILESIZE + TILESIZE / 2)
screen.blit(home_img, home_img.get_rect(center=goal_center))
start_center = (start[0] * TILESIZE + TILESIZE / 2, start[1] * TILESIZE + TILESIZE / 2)
screen.blit(cross_img, cross_img.get_rect(center=start_center))
def vec2int(v):
return (int(v.x), int(v.y))
def t2px(tile):
x, y = tile
x = x * TILESIZE + TILESIZE / 2
y = y * TILESIZE + TILESIZE / 2
return (x, y)
def dijkstra_search(graph, start):
frontier = PriorityQueue()
frontier.put(start, 0)
path = {}
cost = {}
path[start] = None
cost[start] = 0
while not frontier.empty():
current = frontier.get()
for next in graph.find_neighbors(current):
next_cost = cost[current] + graph.cost(current, next)
if next not in cost or next_cost < cost[next]:
cost[next] = next_cost
priority = next_cost
frontier.put(next, priority)
path[next] = current
return path
pg.init()
screen = pg.display.set_mode((WIDTH, HEIGHT))
clock = pg.time.Clock()
# icons
icon_dir = path.join(path.dirname(__file__), '../icons')
home_img = pg.image.load(path.join(icon_dir, 'home.png')).convert_alpha()
home_img = pg.transform.scale(home_img, (50, 50))
home_img.fill((0, 255, 0, 255), special_flags=pg.BLEND_RGBA_MULT)
cross_img = pg.image.load(path.join(icon_dir, 'cross.png')).convert_alpha()
cross_img = pg.transform.scale(cross_img, (50, 50))
cross_img.fill((255, 0, 0, 255), special_flags=pg.BLEND_RGBA_MULT)
all_sprites = pg.sprite.Group()
g = SquareGrid(30, 15)
m = WeightedMesh()
# add walls
walls = [(3, 2), (2, 2), (4, 1), (4, 2), (5, 2), (6, 2), (6, 3), (6, 4), (6, 5), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10), (9, 9), (9, 8), (9, 6), (9, 5), (7, 5), (8, 5), (6, 11), (6, 12), (6, 14), (10, 7), (9, 7), (11, 7), (12, 7), (12, 9), (12, 8), (12, 11), (12, 12), (12, 13), (12, 14), (11, 0), (11, 2), (11, 3), (11, 4), (12, 4), (13, 4), (13, 5), (14, 5), (16, 5), (17, 5), (18, 5), (19, 5), (17, 4), (17, 3), (17, 0), (17, 1), (21, 5), (22, 5), (23, 5), (23, 4), (23, 3), (23, 2), (23, 0), (24, 4), (25, 4), (27, 4), (17, 6), (17, 8), (17, 9), (17, 10), (17, 11), (17, 12), (17, 14), (18, 9), (20, 9), (19, 9), (21, 9), (22, 9), (23, 9), (24, 9), (26, 9), (27, 9), (22, 10), (22, 11), (22, 13), (22, 14), (1, 7), (1, 6), (1, 5), (1, 4), (1, 3), (1, 2), (1, 9), (1, 10)]
for wall in walls:
g.walls.append(vec(wall))
goal = (0, 1)
start = (17, 13)
paths = dijkstra_search(m, m.find_nearest(goal))
print(paths)
pg.time.set_timer(pg.USEREVENT, 50)
paused = False
running = True
while running:
clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
if event.key == pg.K_SPACE:
paused = not paused
# if event.type == pg.USEREVENT and not paused:
# Mob()
if event.type == pg.MOUSEBUTTONDOWN:
mpos = pg.mouse.get_pos()
x = mpos[0] // TILESIZE
y = mpos[1] // TILESIZE
if event.button == 1:
# move start
start = m.find_nearest((x, y))
if event.button == 3:
# move goal
goal = m.find_nearest((x, y))
paths = dijkstra_search(m, m.find_nearest(goal))
if not paused:
all_sprites.update()
pg.display.set_caption("{:.2f}".format(clock.get_fps()))
screen.fill(DARKGRAY)
draw_grid()
g.draw()
m.draw()
# draw path from start to goal
current = start
l = 0
while current != goal:
next_node = paths[current]
current_tile = t2px(current)
next_tile = t2px(next_node)
pg.draw.line(screen, RED, current_tile, next_tile, 8)
# l += m.cost(current, next_node)
current = paths[current]
# for n in disp_nodes:
# screen_x = n[0] * TILESIZE + TILESIZE / 2
# screen_y = n[1] * TILESIZE + TILESIZE / 2
# draw_text(str(l), 12, WHITE, 10, 10, align="topleft")
draw_icons()
pg.display.flip()
|
|
from app.models import (
NOTIFICATION_STATUS_LETTER_ACCEPTED,
NOTIFICATION_STATUS_LETTER_RECEIVED,
NOTIFICATION_STATUS_TYPES,
NOTIFICATION_TYPES,
)
from app.schema_validation.definitions import personalisation, uuid
template = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "template schema",
"type": "object",
"title": "notification content",
"properties": {
"id": uuid,
"version": {"type": "integer"},
"uri": {"type": "string", "format": "uri"}
},
"required": ["id", "version", "uri"]
}
notification_by_id = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "GET notification response schema",
"type": "object",
"title": "response v2/notification",
"properties": {
"notification_id": uuid
},
"required": ["notification_id"]
}
get_notification_response = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "GET notification response schema",
"type": "object",
"title": "response v2/notification",
"properties": {
"id": uuid,
"reference": {"type": ["string", "null"]},
"email_address": {"type": ["string", "null"]},
"phone_number": {"type": ["string", "null"]},
"line_1": {"type": ["string", "null"]},
"line_2": {"type": ["string", "null"]},
"line_3": {"type": ["string", "null"]},
"line_4": {"type": ["string", "null"]},
"line_5": {"type": ["string", "null"]},
"line_6": {"type": ["string", "null"]},
"postcode": {"type": ["string", "null"]},
"type": {"enum": ["sms", "letter", "email"]},
"status": {"type": "string"},
"template": template,
"body": {"type": "string"},
"subject": {"type": ["string", "null"]},
"created_at": {"type": "string"},
"sent_at": {"type": ["string", "null"]},
"completed_at": {"type": ["string", "null"]},
"scheduled_for": {"type": ["string", "null"]}
},
"required": [
# technically, all keys are required since we always have all of them
"id", "reference", "email_address", "phone_number",
"line_1", "line_2", "line_3", "line_4", "line_5", "line_6", "postcode",
"type", "status", "template", "body", "created_at", "sent_at", "completed_at"
]
}
get_notifications_request = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "schema for query parameters allowed when getting list of notifications",
"type": "object",
"properties": {
"reference": {"type": "string"},
"status": {
"type": "array",
"items": {
"enum": NOTIFICATION_STATUS_TYPES +
[NOTIFICATION_STATUS_LETTER_ACCEPTED + ', ' + NOTIFICATION_STATUS_LETTER_RECEIVED]
}
},
"template_type": {
"type": "array",
"items": {
"enum": NOTIFICATION_TYPES
}
},
"include_jobs": {"enum": ["true", "True"]},
"older_than": uuid
},
"additionalProperties": False,
}
get_notifications_response = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "GET list of notifications response schema",
"type": "object",
"properties": {
"notifications": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/notification"
}
},
"links": {
"type": "object",
"properties": {
"current": {
"type": "string"
},
"next": {
"type": "string"
}
},
"additionalProperties": False,
"required": ["current"]
}
},
"additionalProperties": False,
"required": ["notifications", "links"],
"definitions": {
"notification": get_notification_response
},
}
post_sms_request = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST sms notification schema",
"type": "object",
"title": "POST v2/notifications/sms",
"properties": {
"reference": {"type": "string"},
"phone_number": {"type": "string", "format": "phone_number"},
"template_id": uuid,
"personalisation": personalisation,
"scheduled_for": {"type": ["string", "null"], "format": "datetime_within_next_day"},
"sms_sender_id": uuid
},
"required": ["phone_number", "template_id"],
"additionalProperties": False
}
sms_content = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "content schema for SMS notification response schema",
"type": "object",
"title": "notification content",
"properties": {
"body": {"type": "string"},
"from_number": {"type": "string"}
},
"required": ["body", "from_number"]
}
post_sms_response = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST sms notification response schema",
"type": "object",
"title": "response v2/notifications/sms",
"properties": {
"id": uuid,
"reference": {"type": ["string", "null"]},
"content": sms_content,
"uri": {"type": "string", "format": "uri"},
"template": template,
"scheduled_for": {"type": ["string", "null"]}
},
"required": ["id", "content", "uri", "template"]
}
post_email_request = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST email notification schema",
"type": "object",
"title": "POST v2/notifications/email",
"properties": {
"reference": {"type": "string"},
"email_address": {"type": "string", "format": "email_address"},
"template_id": uuid,
"personalisation": personalisation,
"scheduled_for": {"type": ["string", "null"], "format": "datetime_within_next_day"},
"email_reply_to_id": uuid
},
"required": ["email_address", "template_id"],
"additionalProperties": False
}
email_content = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Email content for POST email notification",
"type": "object",
"title": "notification email content",
"properties": {
"from_email": {"type": "string", "format": "email_address"},
"body": {"type": "string"},
"subject": {"type": "string"}
},
"required": ["body", "from_email", "subject"]
}
post_email_response = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST email notification response schema",
"type": "object",
"title": "response v2/notifications/email",
"properties": {
"id": uuid,
"reference": {"type": ["string", "null"]},
"content": email_content,
"uri": {"type": "string", "format": "uri"},
"template": template,
"scheduled_for": {"type": ["string", "null"]}
},
"required": ["id", "content", "uri", "template"]
}
post_letter_request = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST letter notification schema",
"type": "object",
"title": "POST v2/notifications/letter",
"properties": {
"reference": {"type": "string"},
"template_id": uuid,
"personalisation": personalisation
},
"required": ["template_id", "personalisation"],
"additionalProperties": False
}
post_precompiled_letter_request = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST precompiled letter notification schema",
"type": "object",
"title": "POST v2/notifications/letter",
"properties": {
"reference": {"type": "string"},
"content": {"type": "string"},
"postage": {"type": "string", "format": "postage"}
},
"required": ["reference", "content"],
"additionalProperties": False
}
letter_content = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Letter content for POST letter notification",
"type": "object",
"title": "notification letter content",
"properties": {
"body": {"type": "string"},
"subject": {"type": "string"}
},
"required": ["body", "subject"]
}
post_letter_response = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST sms notification response schema",
"type": "object",
"title": "response v2/notifications/letter",
"properties": {
"id": uuid,
"reference": {"type": ["string", "null"]},
"content": letter_content,
"uri": {"type": "string", "format": "uri"},
"template": template,
# letters cannot be scheduled
"scheduled_for": {"type": "null"}
},
"required": ["id", "content", "uri", "template"]
}
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-iap documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-iap"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-iap",
"github_user": "googleapis",
"github_repo": "python-iap",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-iap-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-iap.tex",
"google-cloud-iap Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(root_doc, "google-cloud-iap", "google-cloud-iap Documentation", [author], 1,)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-iap",
"google-cloud-iap Documentation",
author,
"google-cloud-iap",
"google-cloud-iap Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent an Ali Virtual Machine object.
All VM specifics are self-contained and the class provides methods to
operate on the VM: boot, shutdown, etc.
"""
import json
import threading
import logging
from perfkitbenchmarker import flags
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import windows_virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import disk
from perfkitbenchmarker.alicloud import ali_disk
from perfkitbenchmarker.alicloud import util
FLAGS = flags.FLAGS
NON_HVM_PREFIXES = ['t1', 's1', 's2', 's3', 'm1']
flags.DEFINE_string('ali_user_name', 'ubuntu',
'This determines the user name that Perfkit will '
'attempt to use. This must be changed in order to '
'use any image other than ubuntu.')
flags.DEFINE_integer('ali_bandwidth_in', 100, 'Inbound Bandwidth')
flags.DEFINE_integer('ali_bandwidth_out', 100, 'Outbound Bandwidth')
flags.DEFINE_string('io_optimized', 'none',
'IO optimized for disk in AliCloud. The default is '
'none which means no IO optimized'
'"optimized" means use IO optimized.')
DRIVE_START_LETTER = 'b'
DEFAULT_DISK_SIZE = 500
INSTANCE = 'instance'
IMAGE = 'image'
SNAPSHOT = 'snapshot'
DISK = 'disk'
NONE = 'none'
IO_OPTIMIZED = 'io_optimized'
RESOURCE_TYPE = {
INSTANCE: 'instance',
IMAGE: 'image',
SNAPSHOT: 'snapshot',
DISK: 'disk',
}
IO_STRATAGE = {
NONE: 'none',
IO_OPTIMIZED: 'optimized',
}
NUM_LOCAL_VOLUMES = {
'ecs.t1.small': 4,
'ecs.s1.small': 4,
'ecs.s1.medium': 4,
'ecs.s2.small': 4,
'ecs.s2.large': 4,
'ecs.s2.xlarge': 4,
'ecs.s3.medium': 4,
'ecs.s3.large': 4,
'ecs.m1.medium': 4,
}
INSTANCE_EXISTS_STATUSES = frozenset(
['Starting', 'Running', 'Stopping', 'Stopped'])
INSTANCE_DELETED_STATUSES = frozenset([])
INSTANCE_KNOWN_STATUSES = INSTANCE_EXISTS_STATUSES | INSTANCE_DELETED_STATUSES
DEFAULT_IMAGE = "ubuntu1404_64_20G_aliaegis_20150325.vhd",
class AliVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing an AliCloud Virtual Machine."""
DEFAULT_ZONE = 'cn-hangzhou-d'
DEFAULT_MACHINE_TYPE = 'ecs.s3.large'
IMAGE_NAME_FILTER = 'ubuntu1404_64_20G_aliaegis*'
_lock = threading.Lock()
imported_keyfile_set = set()
deleted_keyfile_set = set()
def __init__(self, vm_spec, network, firewall):
"""Initialize a AliCloud virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the VM.
network: network.BaseNetwork object corresponding to the VM.
firewall: network.BaseFirewall object corresponding to the VM.
"""
super(AliVirtualMachine, self).__init__(vm_spec, network, firewall)
self.image = self.image or DEFAULT_IMAGE
self.user_name = FLAGS.ali_user_name
self.region = util.GetRegionByZone(self.zone)
self.bandwidth_in = FLAGS.ali_bandwidth_in
self.bandwidth_out = FLAGS.ali_bandwidth_out
self.scratch_disk_size = FLAGS.scratch_disk_size or DEFAULT_DISK_SIZE
@classmethod
def _GetDefaultImage(cls, region):
"""Returns the default image given the machine type and region.
If no default is configured, this will return None.
"""
if cls.IMAGE_NAME_FILTER is None:
return None
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeImage',
'--RegionId %s' % region,
'--ImageName \'%s\'' % cls.IMAGE_NAME_FILTER]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = util.IssueRetryableCommand(describe_cmd)
if not stdout:
return None
images = json.loads(stdout)['Images']['Image']
# We want to return the latest version of the image, and since the wildcard
# portion of the image name is the image's creation date, we can just take
# the image with the 'largest' name.
return max(images, key=lambda image: image['ImageName'])['ImageId']
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's data and tag it."""
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeInstances',
'--RegionId %s' % self.region,
'--InstanceIds \'["%s"]\'' % self.id]
logging.info('Getting instance %s public IP. This will fail until '
'a public IP is available, but will be retried.', self.id)
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instance = response['Instances']['Instance'][0]
assert instance['PublicIpAddress']['IpAddress'][0] == self.ip_address
self.internal_ip = instance['InnerIpAddress']['IpAddress'][0]
self.group_id = instance['SecurityGroupIds']['SecurityGroupId'][0]
key_file = vm_util.GetPublicKeyPath()
util.AddPubKeyToHost(self.ip_address,
self.password,
key_file,
self.user_name)
util.AddDefaultTags(self.id, RESOURCE_TYPE[INSTANCE], self.region)
def _CreateDependencies(self):
"""Create VM dependencies."""
pass
def _DeleteDependencies(self):
"""Delete VM dependencies."""
pass
def _Create(self):
"""Create a VM instance."""
if self.image is None:
# This is here and not in the __init__ method bceauese _GetDefaultImage
# does a nontrivial amount of work (it calls the aliyuncli).
self.image = self._GetDefaultImage(self.region)
self.password = util.GeneratePassword()
create_cmd = util.ALI_PREFIX + [
'ecs',
'CreateInstance',
'--InstanceName perfkit-%s' % FLAGS.run_uri,
'--RegionId %s' % self.region,
'--ZoneId %s' % self.zone,
'--ImageId %s' % self.image,
'--InstanceType %s' % self.machine_type,
'--InternetChargeType PayByTraffic',
'--InternetMaxBandwidthIn %s' % self.bandwidth_in,
'--InternetMaxBandwidthOut %s' % self.bandwidth_out,
'--SecurityGroupId %s' % self.network.security_group.group_id,
'--Password %s' % self.password]
if FLAGS.scratch_disk_type == disk.LOCAL:
disk_cmd = [
'--SystemDiskCategory ephemeral_ssd',
'--DataDisk1Category ephemeral_ssd',
'--DataDisk1Size %s' % self.scratch_disk_size,
'--DataDisk1Device /dev/xvd%s' % DRIVE_START_LETTER]
create_cmd += disk_cmd
if FLAGS.io_optimized == IO_STRATAGE[IO_OPTIMIZED]:
io_opt_cmd = ['--IoOptimized optimized']
create_cmd += io_opt_cmd
create_cmd = util.GetEncodedCmd(create_cmd)
stdout, _ = vm_util.IssueRetryableCommand(create_cmd)
response = json.loads(stdout)
self.id = response['InstanceId']
allocateip_cmd = util.ALI_PREFIX + [
'ecs',
'AllocatePublicIpAddress',
'--RegionId %s' % self.region,
'--InstanceId %s' % self.id]
allocateip_cmd = util.GetEncodedCmd(allocateip_cmd)
stdout, _ = vm_util.IssueRetryableCommand(allocateip_cmd)
response = json.loads(stdout)
self.ip_address = response['IpAddress']
start_cmd = util.ALI_PREFIX + [
'ecs',
'StartInstance',
'--RegionId %s' % self.region,
'--InstanceId %s' % self.id]
start_cmd = util.GetEncodedCmd(start_cmd)
vm_util.IssueRetryableCommand(start_cmd)
def _Delete(self):
"""Delete a VM instance."""
stop_cmd = util.ALI_PREFIX + [
'ecs',
'StopInstance',
'--RegionId %s' % self.region,
'--InstanceId %s' % self.id]
stop_cmd = util.GetEncodedCmd(stop_cmd)
vm_util.IssueRetryableCommand(stop_cmd)
delete_cmd = util.ALI_PREFIX + [
'ecs',
'DeleteInstance',
'--RegionId %s' % self.region,
'--InstanceId %s' % self.id]
delete_cmd = util.GetEncodedCmd(delete_cmd)
vm_util.IssueRetryableCommand(delete_cmd)
def _Exists(self):
"""Returns true if the VM exists."""
describe_cmd = util.ALI_PREFIX + [
'ecs',
'DescribeInstances',
'--RegionId %s' % self.region,
'--InstanceIds \'["%s"]\'' % str(self.id)]
describe_cmd = util.GetEncodedCmd(describe_cmd)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instances = response['Instances']['Instance']
assert len(instances) < 2, 'Too many instances.'
if not instances:
return False
assert len(instances) == 1, 'Wrong number of instances.'
status = instances[0]['Status']
assert status in INSTANCE_KNOWN_STATUSES, status
return status in INSTANCE_EXISTS_STATUSES
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
data_disk = ali_disk.AliDisk(disk_spec, self.zone)
self.scratch_disks.append(data_disk)
if disk_spec.disk_type != disk.LOCAL:
data_disk.Create()
data_disk.Attach(self)
else:
data_disk.device_letter = DRIVE_START_LETTER
self.FormatDisk(data_disk.GetDevicePath())
self.MountDisk(data_disk.GetDevicePath(), disk_spec.mount_point)
def GetLocalDisks(self):
"""Returns a list of local disks on the VM.
Returns:
A list of strings, where each string is the absolute path to the local
disks on the VM (e.g. '/dev/xvdb').
"""
return ['/dev/xvd%s' % chr(ord(DRIVE_START_LETTER) + i)
for i in xrange(NUM_LOCAL_VOLUMES[self.machine_type])]
def AddMetadata(self, **kwargs):
"""Adds metadata to the VM."""
util.AddTags(self.id, RESOURCE_TYPE[INSTANCE], self.region, **kwargs)
class DebianBasedAliVirtualMachine(AliVirtualMachine,
linux_virtual_machine.DebianMixin):
IMAGE_NAME_FILTER = 'ubuntu1404_64*aliaegis*.vhd'
class RhelBasedAliVirtualMachine(AliVirtualMachine,
linux_virtual_machine.RhelMixin):
pass
class WindowsAliVirtualMachine(AliVirtualMachine,
windows_virtual_machine.WindowsMixin):
pass
|
|
#! /usr/bin/env python
# coding=utf-8
import struct
import bitarray
import io
class Page:
def __init__(self, buffer, number):
self._buffer = buffer
self.clean = True
self.busy = True
self._type = 0
self.number = number
self.temp = False
self.pointer_fmt = struct.Struct('i')
@property
def next_page(self):
return self.pointer_fmt.unpack_from( self._buffer[0:4])[0]
@next_page.setter
def next_page(self, value):
self.pointer_fmt.pack_into(self._buffer[0:4],0,value)
@property
def prev_page(self):
return self.pointer_fmt.unpack_from( self._buffer[4:8])[0]
@prev_page.setter
def prev_page(self, value):
self.pointer_fmt.pack_into( self._buffer[4:8],0,value)
def get_buffer(self):
return self._buffer
class ListPage(Page):
def __init__(self, page):
raise DeprecationWarning
self._fmt = 'i'
self.page = page
self.struct_size = struct.calcsize(self._fmt)
count = Memman.page_size // self.struct_size - 1
self.data_offset = count//8+1
self.bitmask = bitarray.bitarray(self.page._buffer[0:self.data_offset])
self.free_pos = 0
pass
class DataPage():
def __init__(self, page, fmt):
self.fmt = struct.Struct(fmt)
self.page = page
self.reset()
def reset(self):
count = Memman.page_size // self.fmt.size - 1
self.data_offset = (count//8) + 1 + 8 # 8 = page_next + page_prev
self.bitmask = bitarray.bitarray()
self.bitmask = bitarray.bitarray()
self.bitmask.frombytes(bytes(self.page._buffer[8:self.data_offset]))
self.free_pos = 0
self.cur_non_free = -1
self.iterator_pos = -1
for pos, bit in enumerate(self.bitmask):
if bit:
self.cur_non_free = pos
def next_free(self):
self.free_pos += 1
try:
while self.bitmask[self.free_pos]:
self.free_pos += 1
except IndexError:
raise MemoryError #todo change exception
return self.free_pos
def __iter__(self):
for pos, bit in enumerate(self.bitmask):
if bit:
self.iterator_pos = pos
break
return self
def __next__(self):
if self.iterator_pos == -1:
raise StopIteration
old_pos = self.iterator_pos
self.iterator_pos = -1
for pos, bit in enumerate(self.bitmask[old_pos+1:], start=old_pos+1):
if bit:
self.iterator_pos = pos
break
return self.read(old_pos)
#todo check for cur_non_free correct values
def next(self):
if self.cur_non_free == -1:
raise StopIteration
old_non_free = self.cur_non_free
self.cur_non_free = -1
for pos, bit in enumerate(self.bitmask[old_non_free+1:], start=old_non_free+1):
if bit:
self.cur_non_free = pos
break
return self.read(old_non_free), old_non_free
def read(self, pos):
return self.fmt.unpack_from(self.page._buffer, self.data_offset + pos*self.fmt.size)
def write_all(self, data):
for pos, record in enumerate(data):
self.write(record, pos)
def write(self, data, pos=None):
if pos is None:
pos = self.next_free()
if Memman.page_size - (self.data_offset + pos*self.fmt.size) < self.fmt.size:
raise MemoryError #todo make normal exception
self.bitmask[pos] = True
self.page._buffer[8:self.data_offset] = self.bitmask.tobytes()
self.fmt.pack_into( (self.page._buffer), self.data_offset + pos*self.fmt.size, *data)
self.page.clean = False
#return pos
def delete(self, pos):
self.bitmask[pos] = False
self.page._buffer[8:self.data_offset] = self.bitmask.tobytes()
self.page.clean = False
class Memman:
page_size = 4096
max_pages = 20
def __init__(self, path):
self.pages = {}
self.page_usages = {}
try:
self.file = open(path, 'r+b')
except IOError:
tf = open(path, 'wb')
nothing = bytearray(Memman.page_size)
tf.write(nothing)
tf.close()
self.file = open(path, 'r+b')
self.file.seek(0, io.SEEK_END)
self.max_pages_in_file = self.file.tell() // Memman.page_size
self.file.seek(0, io.SEEK_SET)
self.max_pages_in_file = 10
self.deallocate_pages_list = DataPage(self.get_page(0), 'I')
def get_page(self, page_number=None):
if not page_number in self.pages.keys():
if len(self.pages) >= Memman.max_pages:
min_usages = -1
for page in self.pages:
if not self.pages[page].busy:
if self.pages[page].temp:
self.deallocate_page(self.pages[page])
min_usages = 3
break
if min_usages == -1:
min_usages = page
continue
if self.page_usages[min_usages] > self.page_usages[page]:
min_usages = page
if min_usages > -1:
if self.pages[min_usages].temp :
self.deallocate_page(min_usages)
else:
self.write_page(min_usages)
del self.pages[min_usages]
del self.page_usages[min_usages]
if page_number is None:
page_number = self.max_pages_in_file
self.max_pages_in_file += 1
self.file.seek(Memman.page_size * page_number)
buffer = bytearray(self.file.read(Memman.page_size))
if len(buffer) < Memman.page_size:
buffer = bytearray(Memman.page_size)
self.pages[page_number] = Page(buffer, page_number)
self.page_usages[page_number] = 0
self.page_usages[page_number] += 1
return self.pages[page_number]
def allocate_page(self):
try:
page_num, pos = self.deallocate_pages_list.next()
self.deallocate_pages_list.delete(pos)
return self.get_page(page_num)
except StopIteration:
return self.get_page()
def allocate_temp_page(self):
page = self.allocate_page()
page.temp = True
return page
def deallocate_page(self, page_number):
try:
self.deallocate_pages_list.write(page_number)
return True
except MemoryError:
new_page = self.memman.allocate_page()
new_page.next_page = self.deallocate_pages_list.page.next_page
new_page.prev_page = self.deallocate_pages_list.page.number
self.deallocate_pages_list.page.next_page = new_page.number
self.deallocate_pages_list.page.busy = False
self.deallocate_pages_list.page = new_page
self.deallocate_pages_list.reset()
self.deallocate_pages_list.write(page_number)
def write_page(self, page_number):
if not self.pages[page_number].clean:
self.file.seek(Memman.page_size * page_number)
self.file.write(self.pages[page_number].get_buffer())
def close(self):
for page in self.pages:
self.write_page(page)
self.file.close()
|
|
"""TestSuite"""
import sys
import unittest
import six
from unittest2 import case, util
__unittest = True
class BaseTestSuite(unittest.TestSuite):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
_cleanup = True
def __init__(self, tests=()):
self._tests = []
self._removed_tests = 0
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
# Can't guarantee hash invariant, so flag as unhashable
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not hasattr(test, '__call__'):
raise TypeError("%r is not callable" % (repr(test),))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, six.string_types):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for index, test in enumerate(self):
if result.shouldStop:
break
test(result)
if self._cleanup:
self._removeTestAtIndex(index)
return result
def _removeTestAtIndex(self, index):
"""Stop holding a reference to the TestCase at index."""
try:
test = self._tests[index]
except TypeError:
# support for suite implementations that have overriden self._tests
pass
else:
# Some unittest tests add non TestCase/TestSuite objects to
# the suite.
if hasattr(test, 'countTestCases'):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if self._cleanup:
self._removeTestAtIndex(index)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
try:
setUpClass()
except Exception:
e = sys.exc_info()[1]
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
try:
setUpModule()
except Exception:
e = sys.exc_info()[1]
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
try:
tearDownModule()
except Exception:
e = sys.exc_info()[1]
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
try:
tearDownClass()
except Exception:
e = sys.exc_info()[1]
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module detects whether third-party libraries, utilized by third-party
drivers, are present on the system. If they are not, it mocks them and tinkers
with sys.modules so that the drivers can be loaded by unit tests, and the unit
tests can continue to test the functionality of those drivers without the
respective external libraries' actually being present.
Any external library required by a third-party driver should be mocked here.
Current list of mocked libraries:
- seamicroclient
- ipminative
- proliantutils
- pysnmp
- scciclient
- oneview_client
"""
import sys
import mock
from oslo_utils import importutils
import six
from ironic.drivers.modules import ipmitool
from ironic.tests.unit.drivers import third_party_driver_mock_specs \
as mock_specs
# attempt to load the external 'seamicroclient' library, which is
# required by the optional drivers.modules.seamicro module
seamicroclient = importutils.try_import("seamicroclient")
if not seamicroclient:
smc = mock.MagicMock(spec_set=mock_specs.SEAMICRO_SPEC)
smc.client = mock.MagicMock(spec_set=mock_specs.SEAMICRO_CLIENT_MOD_SPEC)
smc.exceptions = mock.MagicMock(spec_set=mock_specs.SEAMICRO_EXC_SPEC)
smc.exceptions.ClientException = Exception
smc.exceptions.UnsupportedVersion = Exception
sys.modules['seamicroclient'] = smc
sys.modules['seamicroclient.client'] = smc.client
sys.modules['seamicroclient.exceptions'] = smc.exceptions
# if anything has loaded the seamicro driver yet, reload it now that
# the external library has been mocked
if 'ironic.drivers.modules.seamicro' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.seamicro'])
# IPMITool driver checks the system for presence of 'ipmitool' binary during
# __init__. We bypass that check in order to run the unit tests, which do not
# depend on 'ipmitool' being on the system.
ipmitool.TIMING_SUPPORT = False
ipmitool.DUAL_BRIDGE_SUPPORT = False
ipmitool.SINGLE_BRIDGE_SUPPORT = False
pyghmi = importutils.try_import("pyghmi")
if not pyghmi:
p = mock.MagicMock(spec_set=mock_specs.PYGHMI_SPEC)
p.exceptions = mock.MagicMock(spec_set=mock_specs.PYGHMI_EXC_SPEC)
p.exceptions.IpmiException = Exception
p.ipmi = mock.MagicMock(spec_set=mock_specs.PYGHMI_IPMI_SPEC)
p.ipmi.command = mock.MagicMock(spec_set=mock_specs.PYGHMI_IPMICMD_SPEC)
p.ipmi.command.Command = mock.MagicMock(spec_set=[])
sys.modules['pyghmi'] = p
sys.modules['pyghmi.exceptions'] = p.exceptions
sys.modules['pyghmi.ipmi'] = p.ipmi
sys.modules['pyghmi.ipmi.command'] = p.ipmi.command
# FIXME(deva): the next line is a hack, because several unit tests
# actually depend on this particular string being present
# in pyghmi.ipmi.command.boot_devices
p.ipmi.command.boot_devices = {'pxe': 4}
if 'ironic.drivers.modules.ipminative' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.ipminative'])
proliantutils = importutils.try_import('proliantutils')
if not proliantutils:
proliantutils = mock.MagicMock(spec_set=mock_specs.PROLIANTUTILS_SPEC)
sys.modules['proliantutils'] = proliantutils
sys.modules['proliantutils.ilo'] = proliantutils.ilo
sys.modules['proliantutils.ilo.client'] = proliantutils.ilo.client
sys.modules['proliantutils.exception'] = proliantutils.exception
proliantutils.exception.IloError = type('IloError', (Exception,), {})
command_exception = type('IloCommandNotSupportedError', (Exception,), {})
proliantutils.exception.IloCommandNotSupportedError = command_exception
if 'ironic.drivers.ilo' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.ilo'])
oneview_client = importutils.try_import('oneview_client')
if not oneview_client:
oneview_client = mock.MagicMock(spec_set=mock_specs.ONEVIEWCLIENT_SPEC)
sys.modules['oneview_client'] = oneview_client
sys.modules['oneview_client.client'] = oneview_client.client
sys.modules['oneview_client.client.Client'] = mock.MagicMock(
spec_set=mock_specs.ONEVIEWCLIENT_CLIENT_CLS_SPEC
)
states = mock.MagicMock(
spec_set=mock_specs.ONEVIEWCLIENT_STATES_SPEC,
ONEVIEW_POWER_OFF='Off',
ONEVIEW_POWERING_OFF='PoweringOff',
ONEVIEW_POWER_ON='On',
ONEVIEW_POWERING_ON='PoweringOn',
ONEVIEW_RESETTING='Resetting',
ONEVIEW_ERROR='error')
sys.modules['oneview_client.states'] = states
sys.modules['oneview_client.exceptions'] = oneview_client.exceptions
oneview_client.exceptions.OneViewException = type('OneViewException',
(Exception,), {})
if 'ironic.drivers.oneview' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.oneview'])
# attempt to load the external 'pywsman' library, which is required by
# the optional drivers.modules.drac and drivers.modules.amt module
pywsman = importutils.try_import('pywsman')
if not pywsman:
pywsman = mock.MagicMock(spec_set=mock_specs.PYWSMAN_SPEC)
sys.modules['pywsman'] = pywsman
# Now that the external library has been mocked, if anything had already
# loaded any of the drivers, reload them.
if 'ironic.drivers.modules.drac' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.drac'])
if 'ironic.drivers.modules.amt' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.amt'])
# attempt to load the external 'iboot' library, which is required by
# the optional drivers.modules.iboot module
iboot = importutils.try_import("iboot")
if not iboot:
ib = mock.MagicMock(spec_set=mock_specs.IBOOT_SPEC)
ib.iBootInterface = mock.MagicMock(spec_set=[])
sys.modules['iboot'] = ib
# if anything has loaded the iboot driver yet, reload it now that the
# external library has been mocked
if 'ironic.drivers.modules.iboot' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.iboot'])
# attempt to load the external 'pysnmp' library, which is required by
# the optional drivers.modules.snmp module
pysnmp = importutils.try_import("pysnmp")
if not pysnmp:
pysnmp = mock.MagicMock(spec_set=mock_specs.PYWSNMP_SPEC)
sys.modules["pysnmp"] = pysnmp
sys.modules["pysnmp.entity"] = pysnmp.entity
sys.modules["pysnmp.entity.rfc3413"] = pysnmp.entity.rfc3413
sys.modules["pysnmp.entity.rfc3413.oneliner"] = (
pysnmp.entity.rfc3413.oneliner)
sys.modules["pysnmp.entity.rfc3413.oneliner.cmdgen"] = (
pysnmp.entity.rfc3413.oneliner.cmdgen)
sys.modules["pysnmp.error"] = pysnmp.error
pysnmp.error.PySnmpError = Exception
sys.modules["pysnmp.proto"] = pysnmp.proto
sys.modules["pysnmp.proto.rfc1902"] = pysnmp.proto.rfc1902
# Patch the RFC1902 integer class with a python int
pysnmp.proto.rfc1902.Integer = int
# if anything has loaded the snmp driver yet, reload it now that the
# external library has been mocked
if 'ironic.drivers.modules.snmp' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.snmp'])
# attempt to load the external 'scciclient' library, which is required by
# the optional drivers.modules.irmc module
scciclient = importutils.try_import('scciclient')
if not scciclient:
mock_scciclient = mock.MagicMock(spec_set=mock_specs.SCCICLIENT_SPEC)
sys.modules['scciclient'] = mock_scciclient
sys.modules['scciclient.irmc'] = mock_scciclient.irmc
sys.modules['scciclient.irmc.scci'] = mock.MagicMock(
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC,
POWER_OFF=mock.sentinel.POWER_OFF,
POWER_ON=mock.sentinel.POWER_ON,
POWER_RESET=mock.sentinel.POWER_RESET,
MOUNT_CD=mock.sentinel.MOUNT_CD,
UNMOUNT_CD=mock.sentinel.UNMOUNT_CD,
MOUNT_FD=mock.sentinel.MOUNT_FD,
UNMOUNT_FD=mock.sentinel.UNMOUNT_FD)
# if anything has loaded the iRMC driver yet, reload it now that the
# external library has been mocked
if 'ironic.drivers.modules.irmc' in sys.modules:
six.moves.reload_module(sys.modules['ironic.drivers.modules.irmc'])
# install mock object to prevent 'iscsi_irmc' and 'agent_irmc' from
# checking whether NFS/CIFS share file system is mounted or not.
irmc_deploy = importutils.import_module(
'ironic.drivers.modules.irmc.deploy')
irmc_deploy._check_share_fs_mounted_orig = irmc_deploy._check_share_fs_mounted
irmc_deploy._check_share_fs_mounted_patcher = mock.patch(
'ironic.drivers.modules.irmc.deploy._check_share_fs_mounted')
irmc_deploy._check_share_fs_mounted_patcher.return_value = None
pyremotevbox = importutils.try_import('pyremotevbox')
if not pyremotevbox:
pyremotevbox = mock.MagicMock(spec_set=mock_specs.PYREMOTEVBOX_SPEC)
pyremotevbox.exception = mock.MagicMock(
spec_set=mock_specs.PYREMOTEVBOX_EXC_SPEC)
pyremotevbox.exception.PyRemoteVBoxException = Exception
pyremotevbox.exception.VmInWrongPowerState = Exception
pyremotevbox.vbox = mock.MagicMock(
spec_set=mock_specs.PYREMOTEVBOX_VBOX_SPEC)
sys.modules['pyremotevbox'] = pyremotevbox
if 'ironic.drivers.modules.virtualbox' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.virtualbox'])
ironic_inspector_client = importutils.try_import('ironic_inspector_client')
if not ironic_inspector_client:
ironic_inspector_client = mock.MagicMock(
spec_set=mock_specs.IRONIC_INSPECTOR_CLIENT_SPEC)
sys.modules['ironic_inspector_client'] = ironic_inspector_client
if 'ironic.drivers.modules.inspector' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.inspector'])
class MockKwargsException(Exception):
def __init__(self, *args, **kwargs):
super(MockKwargsException, self).__init__(*args)
self.kwargs = kwargs
ucssdk = importutils.try_import('UcsSdk')
if not ucssdk:
ucssdk = mock.MagicMock()
sys.modules['UcsSdk'] = ucssdk
sys.modules['UcsSdk.utils'] = ucssdk.utils
sys.modules['UcsSdk.utils.power'] = ucssdk.utils.power
sys.modules['UcsSdk.utils.management'] = ucssdk.utils.management
sys.modules['UcsSdk.utils.exception'] = ucssdk.utils.exception
ucssdk.utils.exception.UcsOperationError = (
type('UcsOperationError', (MockKwargsException,), {}))
ucssdk.utils.exception.UcsConnectionError = (
type('UcsConnectionError', (MockKwargsException,), {}))
if 'ironic.drivers.modules.ucs' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.ucs'])
imcsdk = importutils.try_import('ImcSdk')
if not imcsdk:
imcsdk = mock.MagicMock()
imcsdk.ImcException = Exception
sys.modules['ImcSdk'] = imcsdk
if 'ironic.drivers.modules.cimc' in sys.modules:
six.moves.reload_module(
sys.modules['ironic.drivers.modules.cimc'])
|
|
#!/usr/bin/env python
#Script for analyzing the PBS accounting files
#Print statistics for users, queues, nodes, or jobs
#Print a sorted list of user statistics
#See usage summary
import numpy as np
import pylab as plt
import sys
from optparse import OptionParser
def ec2str(exitCode):
#Returns a descriptive string from an exitCode
if exitCode == 0:
return 'Job Success'
elif exitCode == -11:
return 'JOB_EXEC_RERUN: Job was rerun'
elif exitCode == -10:
return 'JOB_EXEC_FAILUID: Invalid UID/GID for job'
elif exitCode == -4:
return 'JOB_EXEC_INITABT : Job aborted on MOM initialization'
elif exitCode == -3:
return 'JOB_EXEC_RETRY: job execution failed, do retry'
elif exitCode ==-2:
return 'JOB_EXEC_FAIL2 : Job exec failed, after files, no retry'
elif exitCode == -1:
return 'JOB_EXEC_FAIL1 : Job exec failed, before files, no retry'
elif exitCode == 1:
return 'General Error'
elif 2 <= exitCode <= 127:
return 'Exit value of last command in jobscript'
elif exitCode == 128:
return 'Invalid argument to exit()'
elif exitCode == 131:
return 'SIGQUIT: ctrl-\, core dumped'
elif exitCode == 132:
return 'SIGILL: Malformed, unknown, or priviledged instruction'
elif exitCode == 133:
return 'SIGTRAP: Debugger breakpoint'
elif exitCode == 134:
return 'SIGABRT: Process itself called abort'
elif exitCode == 135:
return 'SIGEMT: Emulator trap'
elif exitCode == 136:
return 'SIGFPE: Bad arithmetic operation (e.g. division by zero)'
elif exitCode == 137:
return 'SIGKILL (e.g. kill -9 command)'
elif exitCode ==139:
return 'SIGSEGV: Segmentation Fault'
elif exitCode == 143:
return 'SIGTERM (probably not canceljob or oom)'
elif exitCode == 151:
return 'SIGIO: Possible I/O error'
elif exitCode == 152:
return 'SIGXCPU: predetermined CPU time used up'
elif exitCode == 153:
return 'SIGXFSZ: File larger than maximum size'
elif 174 <= exitCode <= 253:
return 'Fatal error signal ' + str(exitCode-128)
elif exitCode == 254:
return 'Command invoked cannot execute'
elif exitCode == 255:
return 'command not found, possible path problem'
elif exitCode == 265:
return 'SIGKILL (e.g. kill -9 command)'
elif exitCode == 271:
return 'SIGTERM (e.g. canceljob or oom)'
else:
return 'Unknown Error'
def str2secs(time):
"""
Convert a string of the form HH:MM:SS into a duration in seconds
"""
H, M, S = time.split(':')
return 3600.0*float(H) + 60.0*float(M) + float(S)
def main():
"""
this is the main routine.
"""
#Parse Command-line options
usage = "usage: %prog [options] [Accounting Files] \n\
example: %prog -u class01 -u class02 2013*\n\
example: %prog -s -p jan2013 201301*\n\
example: %prog -q sw 20130101"
parser = OptionParser(usage=usage)
parser.add_option("-u", "--user", dest="user", action="append",\
type="string", help="Print stats for user")
parser.add_option("-j", "--job", dest="jobID", action="append",\
type="int", help="Print stats for a job")
parser.add_option("-q", "--queue", dest="queue", action="append",\
type="string", help="Pring stats for a queue")
parser.add_option("-n", "--node", dest="node", action="append",\
type="string", help="Print stats for a node")
parser.add_option("-p", "--plots", dest="figfile", action="store", \
type="string", help="Plot figures")
parser.add_option("-s", "--sortedlist", dest="list", \
action="store_true", default=False,\
help="Print a sorted list of users")
(options, args) = parser.parse_args()
if options.user == None and options.jobID == None and \
options.queue == None and options.node == None and \
options.figfile == None:
options.list = True
filters = None
if len(args) < 1:
sys.exit(usage + '\n \
--help for list of options')
else:
joblist = args
attributes = {}
items = list() #users and queues to collect statistics for
filters = list() #Filters to apply when calling alljobs()
if options.user != None:
items += options.user
filters += options.user
for u in options.user:
attributes[u] = 'user'
if options.queue != None:
items += options.queue
filters += options.queue
for q in options.queue:
attributes[q] = 'queue'
if options.node != None:
filters += options.node
if options.jobID != None:
filters += str(options.jobID)
#get list of all jobs
if len(filters) == 0:
filters = None
jobs = alljobs(joblist, filters)
if len(attributes) > 0:
itemStats(items, jobs, attributes)
#Print job info
if options.jobID != None:
jobStats(options.jobID, jobs)
#Print node stats
if options.node != None:
for n in options.node:
nodeStats(n, jobs)
#Make plots
if options.figfile != None:
makePlots(options.figfile, jobs, scatterPlots=True)
#Sort all users and print list
if options.list:
userEffList = list()
users = {}
#Add each job to the users dictionary
for job in jobs:
if job.user not in users:
users[job.user] = userClass(job)
else:
users[job.user].joblist.append(job)
#Build a list of user statistics
for usr in users:
userEffList.append([users[usr].sortMetric(), \
users[usr].avgEfficiency(), \
users[usr].avgMemFraction(), \
users[usr].ttlWalltime()/3600.0, \
users[usr].name])
userEffList.sort(reverse=True)
#Print User statistics
for usr in userEffList:
print(usr[1], usr[2], usr[3], usr[4])
#Print detailed stats for "top 10" users in sorted list
for usr in userEffList[:10]:
users[usr[4]].printStats()
def itemStats(items, jobs, attributes):
"""Prints all stats for items of type attribute[item] in jobs list"""
itemdic = {}
found = {}
#Initialize founds to false
for i in items:
found[i] = False
for job in jobs:
for item in items:
#Make sure attribute type is reasonable
assert attributes[item] in ['queue', 'user']
#If job attribute matches item
if item == getattr(job, attributes[item]):
#If first time finding item in jobs
if not found[item]:
#Initialize the appropriate item type
if 'user' in attributes[item]:
itemdic[item] = userClass(job)
elif 'queue' in attributes[item]:
itemdic[item] = queueClass(job)
#Set found to true
found[item] = True
else:
itemdic[item].addJob(job)
for it in items:
if found[it]:
itemdic[it].printStats()
else:
print(attributes[it] + " " + it + " not found in joblist")
return
def jobStats(jobIDs, jobs):
"""Prints all stats for a particular job"""
found = {}
for jobID in jobIDs:
found[jobID] = False
for job in jobs:
for jobID in jobIDs:
if jobID == job.id:
job.printStats()
found[jobID] = True
break
if found[jobID]:
break
for jobID in jobIDs:
if not found[jobID]:
print("Job " + str(jobID) + " not found in joblist.")
return
def nodeStats(node, jobs):
"""Prints all stats for a particular node"""
nde = None
found = False
for job in jobs:
if any(node in n for n in job.nodes):
if nde == None:
nde = nodeClass(job, node)
found = True
else:
nde.addJob(job)
if found:
nde.printStats()
else:
print("node " + node + " not found in joblist")
return
def alljobs(files, filters=None):
"""
routine to read the accounting logs returning a dictionary of jobs
"""
alljobs = list()
walltimesreq = {}
for file in files:
try:
f = open(file, 'r').readlines()
except IOError:
sys.exit('IO Error: File ' + file + ' not found')
for rown in f:
#When filters are used, lines not containing the filter
# words are skipped
if filters != None:
if not any(filt in rown for filt in filters):
continue
row = rown.split()
cores = 1
ppn = 0
gpus = 0
mics = 0
#Exctract the number of cores from the accounting files
if any('Resource_List.walltime' in s for s in row):
id = row[1].split(';')[2].split('.')[0]
for col in row:
if 'Resource_List.walltime=' in col:
wtreq = col.split('=')[-1]
walltimesreq[id] = str2secs(wtreq)
if any('resources_used' in s for s in row):
id = row[1].split(';')[2].split('.')[0]
date = row[0]
time = row[1].split(';')[0]
account = 'unknown'
for col in row:
if 'user=' in col:
user = col.split('=')[-1]
elif 'queue=' in col:
queue = col.split('=')[-1]
elif 'cput=' in col:
cput = col.split('=')[-1]
elif 'used.mem=' in col:
mem = col.split('=')[-1]
elif 'used.vmem=' in col:
vmem = col.split('=')[-1]
elif 'resources_used.walltime=' in col:
walltime = col.split('=')[-1]
elif 'Resource_List.procs=' in col:
cores = col.split('=')[-1]
elif 'Exit_status=' in col:
exitcode = col.split('=')[-1]
elif 'account' in col:
account = col.split('=')[-1]
elif 'jobname' in col:
jobname = col.split('=')[-1]
elif 'qtime' in col:
qtime = col.split('=')[-1]
elif 'etime' in col:
etime = col.split('=')[-1]
elif 'ctime' in col:
ctime = col.split('=')[-1]
elif 'start' in col:
start = col.split('=')[-1]
elif 'end' in col:
end = col.split('=')[-1]
elif 'exec_host' in col:
nodes = col.split('=')[-1].split('+')
nodes = list(set(\
[node.split('/')[0] for node in nodes]\
))
elif 'Resource_List.nodes=' in col and '-' not in col:
col2 = col.split(':')
if len(col2) > 1:
# cores = int(col2[0].split('=')[-1])* \
# int(col2[1].split('=')[-1])
# ppn = int(col2[1].split('=')[-1])
#Todo: Allow the case where both mics and GPUs are used in same job
if len(col2) > 2:
if 'gpus' in col2[2]:
gpus = int(col2[2].split('=')[-1])
elif 'mics' in col2[2]:
mics = int(col2[2].split('=')[-1])
else:
cores = col2[0].split('=')[-1]
ppn = 0
mics = 0
gpus = 0
try:
tiq = int(start) - int(qtime)
tie = int(start) - int(etime)
except(ValueError):
tiq = 0
tie = 0
if id in walltimesreq:
wtreq = walltimesreq[id]
else:
wtreq = -1
# Added to test KC's method
if rown.find('exec_host=') > -1:
list_hostname=[]
for col in row:
if 'exec_host=' in col:
for hostname_exec in col.split("=")[1].split('+'):
list_hostname.append(hostname_exec.split('/')[0])
nb_cores_value=len(list_hostname)
# if int(nb_cores_value) != int(cores):
cores = nb_cores_value
#print("nodeserror: " + str(id) + " " + str(nb_cores_value) + " " + str(cores))
# End KC's method
alljobs.append(jobClass(id, user, date, time, queue, \
cput, mem, nodes, account, \
jobname, vmem, tiq, tie,\
walltime, cores, ppn, gpus, \
mics, exitcode, wtreq, \
ctime, etime, qtime, start, end))
return alljobs
def makePlots(filename, jobs, scatterPlots=False):
"""Creates plots of the job data"""
efficiencies = list()
memUnused = list()
cores = list()
walltime = list()
coreHours = list()
efficienciesCH = list()
if scatterPlots:
memUnused2d = list()
cores2d = list()
walltime2d = list()
coreHours2d = list()
for job in jobs:
memUnused.append(job.memUnusedFraction()*100.0)
cores.append(job.cores)
coreHours.append(job.walltime/3600.0*job.cores)
walltime.append(job.walltime/3600.0)
if 0.0 < job.efficiency() < 2.0:
efficiencies.append(job.efficiency()*100.0)
if scatterPlots:
memUnused2d.append(memUnused[-1])
if job.walltime/3600.0 < 400.0:
coreHours2d.append(coreHours[-1])
efficienciesCH.append(efficiencies[-1])
if scatterPlots:
plt.clf()
plt.hexbin(efficiencies, memUnused2d, bins='log', gridsize=1000)
plt.xlabel('Efficiency (%)')
plt.xlim(0.0, 110.0)
plt.ylabel('Unused Memory (%)')
plt.ylim(0.0, 100.0)
plt.savefig(filename + '.memVsE.png')
plt.clf()
plt.hexbin(efficienciesCH, coreHours2d, bins='log', \
gridsize=(200, 2000))
plt.xlabel('Efficiency (%)')
plt.xlim(0.0, 110.0)
plt.ylabel('Walltime x Cores (core hours)')
plt.ylim(0.0, 400.0)
plt.savefig(filename + '.coreHoursVsE.png')
plt.clf()
plt.cla()
plt.hist(efficiencies, bins=1000, log=True, color='k')
plt.xlabel('Efficiencies (%)')
plt.xlim(0.0, 150.0)
plt.savefig(filename + '.efficiencies.png')
plt.cla()
plt.hist(memUnused, bins=1000, log=True, color='k')
plt.xlabel('Unused Memory (%)')
plt.xlim(0.0, 110.0)
plt.savefig(filename + '.memUnused.png')
plt.cla()
plt.hist(cores, bins=max(cores), log=True, color='k')
plt.xlabel('Number of cores')
plt.xlim(0, 100)
plt.savefig(filename + '.cores.png')
plt.cla()
plt.hist(walltime, bins=1000, log=True, color='k')
plt.xlim(0, 240.0)
plt.xlabel('Walltime (hours)')
plt.savefig(filename + '.walltime.png')
plt.cla()
plt.hist(coreHours, bins=1000, log=True, color='k')
plt.xlim(0, 1000.0)
plt.xlabel('Walltime x Cores (core hours)')
plt.savefig(filename + '.corehours.png')
return
class jobGroup():
"""
A class to hold groups of jobs
"""
def __init__(self, job):
self.joblist = list()
self.joblist.append(job)
def avgEfficiency(self):
"""Average efficiency of user's jobs"""
numJobs = len(self.joblist)
sumEfficiencies = 0.0
for job in self.joblist:
sumEfficiencies += job.efficiency()
return sumEfficiencies / float(numJobs)
def avgMem(self):
"""Average memory of user's jobs"""
numJobs = len(self.joblist)
sumMem = 0.0
for job in self.joblist:
sumMem += job.mem/job.cores
return sumMem / float(numJobs)
def avgMemFraction(self):
"""Average memory use fraction"""
sumMem = 0.0
for job in self.joblist:
sumMem += 1.0 - job.memUnusedFraction()
return sumMem / float(len(self.joblist))
def ttlWalltime(self):
"""Total walltime*cores"""
ttlwt = 0.0
for job in self.joblist:
ttlwt += job.walltime*float(job.cores)
return ttlwt
def gpuHours(self):
"""Total GPU hours (gpus + mics)"""
gpuh = 0.0
for job in self.joblist:
gpuh += job.walltime*(float(job.gpus) + float(job.mics))
return gpuh
def avgQtime(self):
"""Average time in queue"""
avgqt = 0.0
for job in self.joblist:
avgqt += job.tiq
avgqt /= len(self.joblist)
return avgqt
def minQtime(self):
"""Shortest time in queue"""
return min([job.tiq for job in self.joblist])
def maxQtime(self):
"""Longest time in queue"""
return max([job.tiq for job in self.joblist])
def addJob(self, job):
"""Append a job to the joblist"""
self.joblist.append(job)
def printJobs(self, nJobs):
"""Print the last nJobs jobs added to user's joblist"""
for job in self.joblist[-1*nJobs:]:
print(job.id)
def printAllJobs(self):
"""Print stats for all jobs in group"""
for job in self.joblist:
job.printStats()
def badExitJobs(self):
"""Create a list of jobs with non-zero exit codes"""
badExits = list()
for job in self.joblist:
if job.exitcode != 0:
badExits.append(job)
return badExits
def fractionBad(self):
return float(len(self.badExitJobs()))\
/float(len(self.joblist))
def superEfficientJobs(self):
effJobs = list()
for job in self.joblist:
if job.isSuperEfficient():
effJobs.append(job)
return effJobs
def fractionSuperEff(self):
return float(len(self.superEfficientJobs()))\
/float(len(self.joblist))
def printStats(self):
"""Print detailed human readable statistics for user"""
#avgCores is the average number of cores used
avgCores = 0.0
for job in self.joblist:
avgCores += job.cores
avgCores /= len(self.joblist)
print('Number of jobs: ' + str(len(self.joblist)))
print('Average core hours: ' + \
str(self.ttlWalltime()/len(self.joblist)/3600.0) \
+ ' core hours')
print('Total core hours: ' + \
str(self.ttlWalltime()/3600.0) + ' core hours')
print('Total GPU hours (gpus + mics): ' + \
str(self.gpuHours()/3600.0) + ' gpu hours')
print('Average Queuetime: ' + \
str(self.avgQtime()/3600.0) + ' hours')
print('Average efficiency: ' + \
str(self.avgEfficiency()*100.0)\
+ '%')
print('Average Number of Cores: ' + \
str(avgCores))
print('Average Memory per core: ' + \
str(self.avgMem()/1048576.0) \
+ ' GB')
def printBad(self):
print('Jobs with Bad Exits (' + \
str(len(self.badExitJobs())) + \
') (' + str(self.fractionBad()*100.) + \
'%):')
for job in self.badExitJobs()[:10]:
print(str(job.id) + ' exit code: ' \
+ str(job.exitcode) + ': ' + ec2str(job.exitcode))
def printSuperEff(self):
print('Super-efficient jobs (' + \
str(len(self.superEfficientJobs())) + \
') (' + str(self.fractionSuperEff()*100.) + \
'%):')
for job in self.superEfficientJobs()[:10]:
print(str(job.id) + ' efficiency: ' \
+ str(job.efficiency()))
def printTopProp(self, prop, n=5):
"""Prints the most frequent n results of a particular job list member.
For example, the most frequent users or the most frequent error codes"""
propdic = {}
for job in self.joblist:
if prop == 'node':
attr = getattr(job, 'nodes')
for node in attr:
if node not in propdic:
propdic[node] = 1
else:
propdic[node] += 1
else:
attr = getattr(job, prop)
if attr not in propdic:
propdic[attr] = 1
else:
propdic[attr] += 1
sortedProps = sorted(propdic.items(), reverse=True, key=lambda item: item[1])
for sp in sortedProps[:n]:
if prop == 'exitcode':
print(str(sp[0]) + ' (' + str(sp[1]) + '): ' + ec2str(sp[0]))
else:
print(str(sp[0]) + ' (' + str(sp[1]) + ', ' + \
str(100.0*sp[1]/len(self.joblist)) + '%)')
class queueClass(jobGroup):
"""
A class to hold queue information
"""
def __init__(self, job):
self.queue = job.queue
jobGroup.__init__(self, job)
def addJob(self, job):
assert self.queue == job.queue, \
"Error: queue mismatch constructing queue"
assert any(job.id != j.id for j in self.joblist), \
"Error: job %d already added to queue" % job.id
jobGroup.addJob(self, job)
def printStats(self):
"""Print detailed human readable statistics for a queue"""
#avgCores is the average number of cores used
print('******************************')
print('Queue: ' + self.queue)
jobGroup.printStats(self)
#print('Some jobs: ')
#self.printJobs(5)
print('Top users:')
self.printTopProp('user')
print('Most frequently requested core counts:')
jobGroup.printTopProp(self, 'cores')
print('Most frequent exit codes:')
self.printTopProp('exitcode')
# jobGroup.printBad(self)
jobGroup.printSuperEff(self)
print('******************************')
class nodeClass(jobGroup):
"""
A class to hold node information
"""
def __init__(self, job, node):
self.node = node
jobGroup.__init__(self, job)
def addJob(self, job):
assert any(self.node in n for n in job.nodes), \
"Error: node mismatch constructing node"
assert any(job.id != j.id for j in self.joblist), \
"Error: job %d already added to queue" % job.id
jobGroup.addJob(self, job)
def printStats(self):
"""Print detailed human readable statistics for a node"""
print('******************************')
print('Node: ' + self.node)
jobGroup.printStats(self)
print('Top users:')
jobGroup.printTopProp(self, 'user')
print('Most frequently requested core counts:')
jobGroup.printTopProp(self, 'cores')
print('Most frequent exit codes:')
jobGroup.printTopProp(self, 'exitcode')
#print('Some jobs: ')
#self.printJobs(5)
#jobGroup.printBad(self)
jobGroup.printSuperEff(self)
print('******************************')
class userClass(jobGroup):
"""
A class to hold user information
"""
def __init__(self, job):
self.name = job.user
jobGroup.__init__(self, job)
def addJob(self, job):
assert self.name == job.user, \
"Error: user mismatch constructing user"
assert any(job.id != j.id for j in self.joblist), \
"Error: job %d already added to user" % job.id
jobGroup.addJob(self, job)
def sortMetric(self):
"""Metric used for sorting users"""
metric = 0.0
for job in self.joblist:
if 0.0 < job.efficiency() < 1.0:
x = job.efficiency()/(1.0-job.efficiency())
else:
x = 1.0e24
if 0.0 < job.memUnusedFraction() < 1.0:
y = (1.0-job.memUnusedFraction())/job.memUnusedFraction()
else:
y = 1.0e24
metric += np.exp(-x)*np.exp(-y)*job.cores*job.walltime
return metric
def printStats(self):
"""Print detailed human readable statistics for user"""
#nq is a dictionary mapping queues used to how many jobs
nq = {}
#avgCores is the average number of cores used
for job in self.joblist:
if job.queue in nq:
nq[job.queue] += 1
else:
nq[job.queue] = 1
print('******************************')
print('User: ' + self.name)
jobGroup.printStats(self)
print('Queues used (number of jobs):')
for q in nq:
print(str(q) + ' (' + str(nq[q]) + ')')
print('Most frequent exit codes:')
jobGroup.printTopProp(self, 'exitcode')
print('Most frequently requested core counts:')
jobGroup.printTopProp(self, 'cores')
#print('Some jobs: ')
#self.printJobs(5)
#self.printBad()
self.printSuperEff()
print('******************************')
class jobClass():
"""
A class to hold PBS job statistics
"""
def __init__(self, id, user, date, time, queue, cput, mem, \
nodes, account, jobname,\
vmem, tiq, tie, walltime, cores, \
ppn, gpus, mics, exitcode, walltimereq, \
ctime, etime, qtime, start, end):
#we read everything in as strings (dtype='str'), cast them to floats
try:
self.id = int(id)
except(ValueError):
self.id = np.nan
self.user = user
self.queue = queue
self.date = date
self.time = time
self.nodes = nodes
self.account = account
self.jobname = jobname
try:
self.cput = str2secs(cput)
except(ValueError):
self.cput = np.nan
try:
self.mem = float(mem[0:-2])
except(ValueError):
self.mem = np.nan
try:
self.vmem = float(vmem[0:-2])
except(ValueError):
self.vmem = np.nan
try:
self.tiq = int(tiq)
except(ValueError):
self.tiq = 0
try:
self.tie = int(tie)
except(ValueError):
self.tie = 0
try:
self.walltime = str2secs(walltime)
self.walltime = max(0.1, self.walltime)
except(ValueError):
self.walltime = np.nan
try:
self.cores = int(cores)
except(ValueError):
self.cores = 1
try:
self.ppn = int(ppn)
except(ValueError):
self.ppn = 0
try:
self.gpus = int(gpus)
except(ValueError):
self.gpus = 0
try:
self.mics = int(mics)
except(ValueError):
self.mics = 0
try:
self.exitcode = int(exitcode)
except(ValueError):
self.exitcode = -100
try:
self.walltimereq = int(walltimereq)
except(ValueError):
self.walltimereq = -1
try:
self.ctime = int(ctime)
except(ValueError):
self.ctime = -1
try:
self.etime = int(etime)
except(ValueError):
self.etime = -1
try:
self.qtime = int(qtime)
except(ValueError):
self.qtime = -1
try:
self.start = int(start)
except(ValueError):
self.start = -1
try:
self.end = int(end)
except(ValueError):
self.end = -1
self.queue = queue
def efficiency(self):
"""The CPU usage efficiency of the job"""
return self.cput / (self.walltime * float(self.cores))
def memUnused(self):
""" Unused memory in GB """
memUsed = self.mem / 1048576.0
memPerCore = {'hb':1.7, 'lm':5.7, 'scalemp':8.0, \
'lmgpu':5.7, 'sw':2.7}
if self.queue in memPerCore:
memTotal = memPerCore[self.queue]*self.cores
else:
memTotal = 3.0*self.cores
return (memTotal - memUsed)
def memUnusedFraction(self):
memPerCore = {'hb':1.7, 'lm':5.7, 'scalemp':8.0, \
'lmgpu':5.7, 'sw':2.7}
if self.queue in memPerCore:
return self.memUnused()/(memPerCore[self.queue]*self.cores)
else:
return self.memUnused()/(3.0*self.cores)
def isSuperEfficient(self):
"""Returns true if the job has a strange efficiency result"""
prob = False
if self.cput < 0:
prob = True
elif self.cput > self.walltime*self.cores:
prob = True
return prob
def wasRunningAt(self, time):
return self.start < time < self.end
def printStats(self):
"""Print human readable stats for the job"""
print('******************************')
print('Job name: ' + str(self.jobname))
print('JobID: ' + str(self.id))
print('User: ' + str(self.user))
print('queue: ' + str(self.queue))
print('Completed: ' + str(self.date) + ' ' + str(self.time))
print('qtime in Queue: ' + str(self.tiq/3600.0) + ' hours')
print('etime in Queue: ' + str(self.tie/3600.0) + ' hours')
print('cores: ' + str(self.cores))
print('walltime: ' + str(self.walltime/3600.0) + ' hours')
print('walltime requested: ' + str(self.walltimereq/3600.0) + ' hours')
print('cpu time: ' + str(self.cput/3600.0) + ' hours')
print('efficiency: ' + str(self.efficiency() * 100.0) + ' %')
print('mem: ' + str(self.mem / 1048576.0) + ' GB')
print('vmem: ' + str(self.vmem / 1048576.0) + ' GB')
print('exitcode: ' + str(self.exitcode) + ': ' + ec2str(self.exitcode))
print('nodes: ' + str(self.nodes))
print('******************************')
return
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""
Default Controllers
@author: Fran Boon
"""
module = "default"
# Options Menu (available in all Functions)
response.menu_options = [
#[T("About Sahana"), False, URL(r=request, f="about")],
]
def call():
"Call an XMLRPC, JSONRPC or RSS service"
# If webservices don't use sessions, avoid cluttering up the storage
#session.forget()
return service()
def download():
"Download a file"
return response.download(request, db)
# Add newly-registered users to Person Registry & 'Authenticated' role
auth.settings.register_onaccept = lambda form: auth.shn_register(form)
_table_user = auth.settings.table_user
_table_user.first_name.label = T("First Name")
_table_user.last_name.label = T("Last Name")
_table_user.last_name.comment = SPAN("*", _class="req")
_table_user.email.label = T("E-mail")
_table_user.email.comment = SPAN("*", _class="req")
#_table_user.password.label = T("Password")
#_table_user.language.label = T("Language")
_table_user.language.default = "en"
_table_user.language.comment = DIV(_class="tooltip", _title=T("Language") + "|" + T("The language to use for notifications."))
_table_user.language.represent = lambda opt: s3_languages.get(opt, UNKNOWN_OPT)
def index():
""" Main Home Page """
div_sit = DIV(H3(T("SITUATION")),
A(DIV(T("Incidents"),
_class = "menu_box"
),
_href = URL( r=request, c="irs", f= "ireport")
),
A(DIV(T("Basic Assess."),
_class = "menu_box"
),
_href = URL( r=request, c="assess", f= "basic_assess")
),
A(DIV(T("Inventories"),
_class = "menu_box"
),
_href = URL( r=request, c="inventory", f= "store")
),
_class = "menu_div"
)
div_arrow_1 = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % request.application),
_class = "div_arrow")
div_dec = DIV(H3(T("DECISION")),
A(DIV(T("Gap Report"),
_class = "menu_box"
),
_href = URL( r=request, c="project", f= "gap_report")
),
A(DIV(T("Gap Map"),
_class = "menu_box"
),
_href = URL( r=request, c="project", f= "gap_map")
),
A(DIV(T("Map"),
_class = "menu_box"
),
_href = URL( r=request, c="gis", f= "index")
),
_class = "menu_div"
)
div_arrow_2 = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % request.application),
_class = "div_arrow")
div_res = DIV(H3(T("RESPONSE")),
A(DIV(T("Activities"),
_class = "menu_box"
),
_href = URL( r=request, c="project", f= "activity")
),
A(DIV(T("Requests"),
_class = "menu_box"
),
_href = URL( r=request, c="rms", f= "req")
),
#A(DIV(T("Distribution"),
# _class = "menu_box"
# ),
# _href = URL( r=request, c="logs", f= "distrib")
# ),
_class = "menu_div",
_id = "menu_div_response"
)
div_additional = DIV(A(DIV(T("Mobile Assess."),
_class = "menu_box"
),
_href = URL( r=request, c="assess", f= "mobile_basic_assess")
))
modules = deployment_settings.modules
module_name = modules[module].name_nice
settings = db(db.s3_setting.id == 1).select(limitby=(0, 1)).first()
if settings:
admin_name = settings.admin_name
admin_email = settings.admin_email
admin_tel = settings.admin_tel
else:
# db empty and prepopulate is false
admin_name = T("Sahana Administrator"),
admin_email = "support@Not Set",
admin_tel = T("Not Set"),
self_registration = deployment_settings.get_security_self_registration()
response.title = T("Sahana FOSS Disaster Management System")
login_form = None
register_form = None
if not auth.is_logged_in():
# Provide a login box on front page
request.args = ["login"]
login_form = auth()
# Download the registration box on front page ready to unhide without a server-side call
if self_registration:
request.args = ["register"]
register_form = auth()
return dict( div_sit = div_sit,
div_arrow_1 = div_arrow_1,
div_dec = div_dec,
div_arrow_2 = div_arrow_2,
div_res = div_res,
div_additional = div_additional,
module_name=module_name, modules=modules, admin_name=admin_name, admin_email=admin_email, admin_tel=admin_tel, self_registration=self_registration, login_form=login_form, register_form=register_form)
def user():
"Auth functions based on arg. See gluon/tools.py"
auth.settings.on_failed_authorization = URL(r=request, f="error")
if request.args and request.args(0) == "login_next":
# Can redirect the user to another page on first login for workflow (set in 00_settings.py)
# Note the timestamp of last login through the browser
if auth.is_logged_in():
db(db.auth_user.id == auth.user.id).update(timestmp = request.utcnow)
_table_user = auth.settings.table_user
if request.args and request.args(0) == "profile":
#_table_user.organisation.writable = False
_table_user.utc_offset.readable = True
_table_user.utc_offset.writable = True
form = auth()
if request.args and request.args(0) == "login":
login_form = form
else:
login_form = None
if request.args and request.args(0) == "register":
register_form = form
else:
register_form = None
if request.args and request.args(0) == "profile" and deployment_settings.get_auth_openid():
form = DIV(form, openid_login_form.list_user_openids())
self_registration = deployment_settings.get_security_self_registration()
# Use Custom Ext views
# Best to not use an Ext form for login: can't save username/password in browser & can't hit 'Enter' to submit!
#if request.args(0) == "login":
# response.title = T("Login")
# response.view = "auth/login.html"
return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration)
def source():
""" RESTful CRUD controller """
return s3_rest_controller("s3", "source")
# NB These 4 functions are unlikely to get used in production
def header():
"Custom view designed to be pulled into an Ext layout's North Panel"
return dict()
def footer():
"Custom view designed to be pulled into an Ext layout's South Panel"
return dict()
def menu():
"Custom view designed to be pulled into the 1st item of an Ext layout's Center Panel"
return dict()
def list():
"Custom view designed to be pulled into an Ext layout's Center Panel"
return dict()
# About Sahana
def apath(path=""):
"Application path"
import os
from gluon.fileutils import up
opath = up(request.folder)
#TODO: This path manipulation is very OS specific.
while path[:3] == "../": opath, path=up(opath), path[3:]
return os.path.join(opath,path).replace("\\", "/")
def about():
"""
The About page provides details on the software
depedencies and versions available to this instance
of Sahana Eden.
"""
import sys
import subprocess
import string
python_version = sys.version
web2py_version = open(apath("../VERSION"), "r").read()[8:]
sahana_version = open(os.path.join(request.folder, "VERSION"), "r").read()
try:
sqlite_version = (subprocess.Popen(["sqlite3", "-version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()
except:
sqlite_version = T("Not installed or incorrectly configured.")
try:
mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
except:
mysql_version = T("Not installed or incorrectly configured.")
try:
pgsql_reply = (subprocess.Popen(["psql", "--version"], stdout=subprocess.PIPE).communicate()[0])
pgsql_version = string.split(pgsql_reply)[2]
except:
pgsql_version = T("Not installed or incorrectly configured.")
try:
import MySQLdb
pymysql_version = MySQLdb.__revision__
except:
pymysql_version = T("Not installed or incorrectly configured.")
try:
import reportlab
reportlab_version = reportlab.Version
except:
reportlab_version = T("Not installed or incorrectly configured.")
try:
import xlwt
xlwt_version = xlwt.__VERSION__
except:
xlwt_version = T("Not installed or incorrectly configured.")
return dict(
python_version=python_version,
sahana_version=sahana_version,
web2py_version=web2py_version,
sqlite_version=sqlite_version,
mysql_version=mysql_version,
pgsql_version=pgsql_version,
pymysql_version=pymysql_version,
reportlab_version=reportlab_version,
xlwt_version=xlwt_version
)
def help():
"Custom View"
response.title = T("Help")
return dict()
def contact():
"Custom View"
response.title = T("Contact us")
return dict()
|
|
"""
TOC directive
~~~~~~~~~~~~~
The TOC directive syntax looks like::
.. toc:: Title
:depth: 3
"Title" and "depth" option can be empty. "depth" is an integer less
than 6, which defines the max heading level writers want to include
in TOC.
"""
from .base import Directive
class DirectiveToc(Directive):
def __init__(self, depth=3):
self.depth = depth
def parse(self, block, m, state):
title = m.group('value')
depth = None
options = self.parse_options(m)
if options:
depth = dict(options).get('depth')
if depth:
try:
depth = int(depth)
except (ValueError, TypeError):
return {
'type': 'block_error',
'raw': 'TOC depth MUST be integer',
}
return {'type': 'toc', 'raw': None, 'params': (title, depth)}
def reset_toc_state(self, md, s, state):
state['toc_depth'] = self.depth
state['toc_headings'] = []
return s, state
def register_plugin(self, md):
md.block.tokenize_heading = record_toc_heading
md.before_parse_hooks.append(self.reset_toc_state)
md.before_render_hooks.append(md_toc_hook)
if md.renderer.NAME == 'html':
md.renderer.register('theading', render_html_theading)
elif md.renderer.NAME == 'ast':
md.renderer.register('theading', render_ast_theading)
def __call__(self, md):
self.register_directive(md, 'toc')
self.register_plugin(md)
if md.renderer.NAME == 'html':
md.renderer.register('toc', render_html_toc)
elif md.renderer.NAME == 'ast':
md.renderer.register('toc', render_ast_toc)
def record_toc_heading(text, level, state):
# we will use this method to replace tokenize_heading
tid = 'toc_' + str(len(state['toc_headings']) + 1)
state['toc_headings'].append((tid, text, level))
return {'type': 'theading', 'text': text, 'params': (level, tid)}
def md_toc_hook(md, tokens, state):
headings = state.get('toc_headings')
if not headings:
return tokens
# add TOC items into the given location
default_depth = state.get('toc_depth', 3)
headings = list(_cleanup_headings_text(md.inline, headings, state))
for tok in tokens:
if tok['type'] == 'toc':
params = tok['params']
depth = params[1] or default_depth
items = [d for d in headings if d[2] <= depth]
tok['raw'] = items
return tokens
def render_ast_toc(items, title, depth):
return {
'type': 'toc',
'items': [list(d) for d in items],
'title': title,
'depth': depth,
}
def render_ast_theading(children, level, tid):
return {
'type': 'heading', 'children': children,
'level': level, 'id': tid,
}
def render_html_toc(items, title, depth):
html = '<section class="toc">\n'
if title:
html += '<h1>' + title + '</h1>\n'
return html + render_toc_ul(items) + '</section>\n'
def render_html_theading(text, level, tid):
tag = 'h' + str(level)
return '<' + tag + ' id="' + tid + '">' + text + '</' + tag + '>\n'
def extract_toc_items(md, s):
"""Extract TOC headings into list structure of::
[
('toc_1', 'Introduction', 1),
('toc_2', 'Install', 2),
('toc_3', 'Upgrade', 2),
('toc_4', 'License', 1),
]
:param md: Markdown Instance with TOC plugin.
:param s: text string.
"""
s, state = md.before_parse(s, {})
md.block.parse(s, state)
headings = state.get('toc_headings')
if not headings:
return []
return list(_cleanup_headings_text(md.inline, headings, state))
def render_toc_ul(toc):
"""Render a <ul> table of content HTML. The param "toc" should
be formatted into this structure::
[
(toc_id, text, level),
]
For example::
[
('toc-intro', 'Introduction', 1),
('toc-install', 'Install', 2),
('toc-upgrade', 'Upgrade', 2),
('toc-license', 'License', 1),
]
"""
if not toc:
return ''
s = '<ul>\n'
levels = []
for k, text, level in toc:
item = '<a href="#{}">{}</a>'.format(k, text)
if not levels:
s += '<li>' + item
levels.append(level)
elif level == levels[-1]:
s += '</li>\n<li>' + item
elif level > levels[-1]:
s += '\n<ul>\n<li>' + item
levels.append(level)
else:
last_level = levels.pop()
while levels:
last_level = levels.pop()
if level == last_level:
s += '</li>\n</ul>\n</li>\n<li>' + item
levels.append(level)
break
elif level > last_level:
s += '</li>\n<li>' + item
levels.append(last_level)
levels.append(level)
break
else:
s += '</li>\n</ul>\n'
else:
levels.append(level)
s += '</li>\n<li>' + item
while len(levels) > 1:
s += '</li>\n</ul>\n'
levels.pop()
return s + '</li>\n</ul>\n'
def _cleanup_headings_text(inline, items, state):
for item in items:
text = item[1]
tokens = inline._scan(text, state, inline.rules)
text = ''.join(_inline_token_text(tok) for tok in tokens)
yield item[0], text, item[2]
def _inline_token_text(token):
tok_type = token[0]
if tok_type == 'inline_html':
return ''
if len(token) == 2:
return token[1]
if tok_type in {'image', 'link'}:
return token[2]
return ''
|
|
import json
import os
import sys
from collections import namedtuple
from datetime import datetime
from config_util import parse_args, parse_contexts, generate_file_path
from train import do_training
import mxnet as mx
from stt_io_iter import STTIter
from label_util import LabelUtil
from log_util import LogUtil
import numpy as np
from stt_datagenerator import DataGenerator
from stt_metric import STTMetric
from stt_bi_graphemes_util import generate_bi_graphemes_dictionary
from stt_bucketing_module import STTBucketingModule
from stt_io_bucketingiter import BucketSTTIter
sys.path.insert(0, "../../python")
# os.environ['MXNET_ENGINE_TYPE'] = "NaiveEngine"
os.environ['MXNET_ENGINE_TYPE'] = "ThreadedEnginePerDevice"
os.environ['MXNET_ENABLE_GPU_P2P'] = "0"
class WHCS:
width = 0
height = 0
channel = 0
stride = 0
class ConfigLogger(object):
def __init__(self, log):
self.__log = log
def __call__(self, config):
self.__log.info("Config:")
config.write(self)
def write(self, data):
# stripping the data makes the output nicer and avoids empty lines
line = data.strip()
self.__log.info(line)
def load_labelutil(labelUtil, is_bi_graphemes, language="en"):
if language == "en":
if is_bi_graphemes:
try:
labelUtil.load_unicode_set("resources/unicodemap_en_baidu_bi_graphemes.csv")
except:
raise Exception("There is no resources/unicodemap_en_baidu_bi_graphemes.csv." +
" Please set overwrite_bi_graphemes_dictionary True at train section")
else:
labelUtil.load_unicode_set("resources/unicodemap_en_baidu.csv")
else:
raise Exception("Error: Language Type: %s" % language)
def load_data(args):
mode = args.config.get('common', 'mode')
if mode not in ['train', 'predict', 'load']:
raise Exception('mode must be the one of the followings - train,predict,load')
batch_size = args.config.getint('common', 'batch_size')
whcs = WHCS()
whcs.width = args.config.getint('data', 'width')
whcs.height = args.config.getint('data', 'height')
whcs.channel = args.config.getint('data', 'channel')
whcs.stride = args.config.getint('data', 'stride')
save_dir = 'checkpoints'
model_name = args.config.get('common', 'prefix')
is_bi_graphemes = args.config.getboolean('common', 'is_bi_graphemes')
overwrite_meta_files = args.config.getboolean('train', 'overwrite_meta_files')
overwrite_bi_graphemes_dictionary = args.config.getboolean('train', 'overwrite_bi_graphemes_dictionary')
max_duration = args.config.getfloat('data', 'max_duration')
language = args.config.get('data', 'language')
log = LogUtil().getlogger()
labelUtil = LabelUtil.getInstance()
if mode == "train" or mode == "load":
data_json = args.config.get('data', 'train_json')
val_json = args.config.get('data', 'val_json')
datagen = DataGenerator(save_dir=save_dir, model_name=model_name)
datagen.load_train_data(data_json, max_duration=max_duration)
datagen.load_validation_data(val_json, max_duration=max_duration)
if is_bi_graphemes:
if not os.path.isfile("resources/unicodemap_en_baidu_bi_graphemes.csv") or overwrite_bi_graphemes_dictionary:
load_labelutil(labelUtil=labelUtil, is_bi_graphemes=False, language=language)
generate_bi_graphemes_dictionary(datagen.train_texts+datagen.val_texts)
load_labelutil(labelUtil=labelUtil, is_bi_graphemes=is_bi_graphemes, language=language)
args.config.set('arch', 'n_classes', str(labelUtil.get_count()))
if mode == "train":
if overwrite_meta_files:
log.info("Generate mean and std from samples")
normalize_target_k = args.config.getint('train', 'normalize_target_k')
datagen.sample_normalize(normalize_target_k, True)
else:
log.info("Read mean and std from meta files")
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
elif mode == "load":
# get feat_mean and feat_std to normalize dataset
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
elif mode == 'predict':
test_json = args.config.get('data', 'test_json')
datagen = DataGenerator(save_dir=save_dir, model_name=model_name)
datagen.load_train_data(test_json, max_duration=max_duration)
labelutil = load_labelutil(labelUtil, is_bi_graphemes, language="en")
args.config.set('arch', 'n_classes', str(labelUtil.get_count()))
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
is_batchnorm = args.config.getboolean('arch', 'is_batchnorm')
if batch_size == 1 and is_batchnorm and (mode == 'train' or mode == 'load'):
raise Warning('batch size 1 is too small for is_batchnorm')
# sort file paths by its duration in ascending order to implement sortaGrad
if mode == "train" or mode == "load":
max_t_count = datagen.get_max_seq_length(partition="train")
max_label_length = \
datagen.get_max_label_length(partition="train", is_bi_graphemes=is_bi_graphemes)
elif mode == "predict":
max_t_count = datagen.get_max_seq_length(partition="test")
max_label_length = \
datagen.get_max_label_length(partition="test", is_bi_graphemes=is_bi_graphemes)
args.config.set('arch', 'max_t_count', str(max_t_count))
args.config.set('arch', 'max_label_length', str(max_label_length))
from importlib import import_module
prepare_data_template = import_module(args.config.get('arch', 'arch_file'))
init_states = prepare_data_template.prepare_data(args)
sort_by_duration = (mode == "train")
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
save_feature_as_csvfile = args.config.getboolean('train', 'save_feature_as_csvfile')
if is_bucketing:
buckets = json.loads(args.config.get('arch', 'buckets'))
data_loaded = BucketSTTIter(partition="train",
count=datagen.count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=sort_by_duration,
is_bi_graphemes=is_bi_graphemes,
buckets=buckets,
save_feature_as_csvfile=save_feature_as_csvfile)
else:
data_loaded = STTIter(partition="train",
count=datagen.count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=sort_by_duration,
is_bi_graphemes=is_bi_graphemes,
save_feature_as_csvfile=save_feature_as_csvfile)
if mode == 'train' or mode == 'load':
if is_bucketing:
validation_loaded = BucketSTTIter(partition="validation",
count=datagen.val_count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=False,
is_bi_graphemes=is_bi_graphemes,
buckets=buckets,
save_feature_as_csvfile=save_feature_as_csvfile)
else:
validation_loaded = STTIter(partition="validation",
count=datagen.val_count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=False,
is_bi_graphemes=is_bi_graphemes,
save_feature_as_csvfile=save_feature_as_csvfile)
return data_loaded, validation_loaded, args
elif mode == 'predict':
return data_loaded, args
def load_model(args, contexts, data_train):
# load model from model_name prefix and epoch of model_num_epoch with gpu contexts of contexts
mode = args.config.get('common', 'mode')
load_optimizer_states = args.config.getboolean('load', 'load_optimizer_states')
is_start_from_batch = args.config.getboolean('load', 'is_start_from_batch')
from importlib import import_module
symbol_template = import_module(args.config.get('arch', 'arch_file'))
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
if mode == 'train':
if is_bucketing:
bucketing_arch = symbol_template.BucketingArch(args)
model_loaded = bucketing_arch.get_sym_gen()
else:
model_loaded = symbol_template.arch(args)
model_num_epoch = None
elif mode == 'load' or mode == 'predict':
model_file = args.config.get('common', 'model_file')
model_name = os.path.splitext(model_file)[0]
model_num_epoch = int(model_name[-4:])
if is_bucketing:
bucketing_arch = symbol_template.BucketingArch(args)
model_loaded = bucketing_arch.get_sym_gen()
else:
model_path = 'checkpoints/' + str(model_name[:-5])
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
model_loaded = mx.module.Module.load(
prefix=model_path, epoch=model_num_epoch, context=contexts,
data_names=data_names, label_names=label_names,
load_optimizer_states=load_optimizer_states)
if is_start_from_batch:
import re
model_num_epoch = int(re.findall('\d+', model_file)[0])
return model_loaded, model_num_epoch
if __name__ == '__main__':
if len(sys.argv) <= 1:
raise Exception('cfg file path must be provided. ' +
'ex)python main.py --configfile examplecfg.cfg')
args = parse_args(sys.argv[1])
# set parameters from cfg file
# give random seed
random_seed = args.config.getint('common', 'random_seed')
mx_random_seed = args.config.getint('common', 'mx_random_seed')
# random seed for shuffling data list
if random_seed != -1:
np.random.seed(random_seed)
# set mx.random.seed to give seed for parameter initialization
if mx_random_seed != -1:
mx.random.seed(mx_random_seed)
else:
mx.random.seed(hash(datetime.now()))
# set log file name
log_filename = args.config.get('common', 'log_filename')
log = LogUtil(filename=log_filename).getlogger()
# set parameters from data section(common)
mode = args.config.get('common', 'mode')
if mode not in ['train', 'predict', 'load']:
raise Exception(
'Define mode in the cfg file first. ' +
'train or predict or load can be the candidate for the mode.')
# get meta file where character to number conversions are defined
contexts = parse_contexts(args)
num_gpu = len(contexts)
batch_size = args.config.getint('common', 'batch_size')
# check the number of gpus is positive divisor of the batch size for data parallel
if batch_size % num_gpu != 0:
raise Exception('num_gpu should be positive divisor of batch_size')
if mode == "train" or mode == "load":
data_train, data_val, args = load_data(args)
elif mode == "predict":
data_train, args = load_data(args)
is_batchnorm = args.config.getboolean('arch', 'is_batchnorm')
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
# log current config
config_logger = ConfigLogger(log)
config_logger(args.config)
# load model
model_loaded, model_num_epoch = load_model(args, contexts, data_train)
# if mode is 'train', it trains the model
if mode == 'train':
if is_bucketing:
module = STTBucketingModule(
sym_gen=model_loaded,
default_bucket_key=data_train.default_bucket_key,
context=contexts
)
else:
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
module = mx.mod.Module(model_loaded, context=contexts,
data_names=data_names, label_names=label_names)
do_training(args=args, module=module, data_train=data_train, data_val=data_val)
# if mode is 'load', it loads model from the checkpoint and continues the training.
elif mode == 'load':
do_training(args=args, module=model_loaded, data_train=data_train, data_val=data_val,
begin_epoch=model_num_epoch + 1)
# if mode is 'predict', it predict label from the input by the input model
elif mode == 'predict':
# predict through data
if is_bucketing:
max_t_count = args.config.getint('arch', 'max_t_count')
load_optimizer_states = args.config.getboolean('load', 'load_optimizer_states')
model_file = args.config.get('common', 'model_file')
model_name = os.path.splitext(model_file)[0]
model_num_epoch = int(model_name[-4:])
model_path = 'checkpoints/' + str(model_name[:-5])
model = STTBucketingModule(
sym_gen=model_loaded,
default_bucket_key=data_train.default_bucket_key,
context=contexts
)
model.bind(data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label,
for_training=True)
_, arg_params, aux_params = mx.model.load_checkpoint(model_path, model_num_epoch)
model.set_params(arg_params, aux_params)
model_loaded = model
else:
model_loaded.bind(for_training=False, data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label)
max_t_count = args.config.getint('arch', 'max_t_count')
eval_metric = STTMetric(batch_size=batch_size, num_gpu=num_gpu)
if is_batchnorm:
for nbatch, data_batch in enumerate(data_train):
model_loaded.forward(data_batch, is_train=False)
model_loaded.update_metric(eval_metric, data_batch.label)
else:
#model_loaded.score(eval_data=data_train, num_batch=None,
# eval_metric=eval_metric, reset=True)
for nbatch, data_batch in enumerate(data_train):
model_loaded.forward(data_batch, is_train=False)
model_loaded.update_metric(eval_metric, data_batch.label)
else:
raise Exception(
'Define mode in the cfg file first. ' +
'train or predict or load can be the candidate for the mode')
|
|
# Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
import copy
import os
import os_vif
from os_vif import exception as osv_exception
from oslo_concurrency import processutils
from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.network import linux_net
from nova.network import model as network_model
from nova.network import os_vif_util
from nova import objects
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
from nova.virt import osinfo
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
# vhostuser queues support
MIN_LIBVIRT_VHOSTUSER_MQ = (1, 2, 17)
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'kvm': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'xen': [network_model.VIF_MODEL_NETFRONT,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
'lxc': [],
'uml': [],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
class LibvirtGenericVIFDriver(object):
"""Generic VIF driver for libvirt networking."""
def _normalize_vif_type(self, vif_type):
return vif_type.replace('2.1q', '2q')
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_base_config(self, instance, mac, image_meta,
inst_type, virt_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
vhost_queues = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
model = osinfo.HardwareProperties(image_meta).network_model
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
if (model is None and
virt_type in ('kvm', 'qemu') and
CONF.libvirt.use_virtio_for_bridges):
model = network_model.VIF_MODEL_VIRTIO
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if (model == network_model.VIF_MODEL_VIRTIO and
virt_type == "qemu"):
driver = "qemu"
if not is_vif_model_valid_for_virt(virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=virt_type)
if (virt_type == 'kvm' and
model == network_model.VIF_MODEL_VIRTIO):
vhost_drv, vhost_queues = self._get_virtio_mq_settings(image_meta,
inst_type)
driver = vhost_drv or driver
designer.set_vif_guest_frontend_config(
conf, mac, model, driver, vhost_queues)
return conf
def get_base_hostdev_pci_config(self, vif):
conf = vconfig.LibvirtConfigGuestHostdevPCI()
pci_slot = vif['profile']['pci_slot']
designer.set_vif_host_backend_hostdev_pci_config(conf, pci_slot)
return conf
def _get_virtio_mq_settings(self, image_meta, flavor):
"""A methods to set the number of virtio queues,
if it has been requested in extra specs.
"""
driver = None
vhost_queues = None
if not isinstance(image_meta, objects.ImageMeta):
image_meta = objects.ImageMeta.from_dict(image_meta)
img_props = image_meta.properties
if img_props.get('hw_vif_multiqueue_enabled'):
driver = 'vhost'
max_tap_queues = self._get_max_tap_queues()
if max_tap_queues:
vhost_queues = (max_tap_queues if flavor.vcpus > max_tap_queues
else flavor.vcpus)
else:
vhost_queues = flavor.vcpus
return (driver, vhost_queues)
def _get_max_tap_queues(self):
# NOTE(kengo.sakai): In kernels prior to 3.0,
# multiple queues on a tap interface is not supported.
# In kernels 3.x, the number of queues on a tap interface
# is limited to 8. From 4.0, the number is 256.
# See: https://bugs.launchpad.net/nova/+bug/1570631
kernel_version = int(os.uname()[2].split(".")[0])
if kernel_version <= 2:
return 1
elif kernel_version == 3:
return 8
elif kernel_version == 4:
return 256
else:
return None
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
@staticmethod
def is_no_op_firewall():
return CONF.firewall_driver == "nova.virt.firewall.NoopFirewallDriver"
def get_firewall_required(self, vif):
if vif.is_neutron_filtering_enabled():
return False
if self.is_no_op_firewall():
return False
return True
def get_firewall_required_os_vif(self, vif):
if vif.has_traffic_filtering:
return False
if self.is_no_op_firewall():
return False
return True
def get_config_bridge(self, instance, vif, image_meta,
inst_type, virt_type, host):
"""Get VIF configurations for bridge type."""
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance.name + "-" + mac_id
if self.get_firewall_required(vif):
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ivs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type, host):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
inst_type,
virt_type,
host)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance,
vif['address'],
image_meta,
inst_type,
virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ivs(self, instance, vif, image_meta,
inst_type, virt_type, host):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type,
host)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
inst_type,
virt_type,
host)
def get_config_802qbg(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_802qbh_config(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_PROFILEID])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hw_veb(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_hw_veb(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_VLAN])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hostdev_physical(self, instance, vif, image_meta,
inst_type, virt_type, host):
return self.get_base_hostdev_pci_config(vif)
def get_config_macvtap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
vif_details = vif['details']
macvtap_src = vif_details.get(network_model.VIF_DETAILS_MACVTAP_SOURCE)
macvtap_mode = vif_details.get(network_model.VIF_DETAILS_MACVTAP_MODE)
phys_interface = vif_details.get(
network_model.VIF_DETAILS_PHYS_INTERFACE)
missing_params = []
if macvtap_src is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_SOURCE)
if macvtap_mode is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_MODE)
if phys_interface is None:
missing_params.append(network_model.VIF_DETAILS_PHYS_INTERFACE)
if len(missing_params) > 0:
raise exception.VifDetailsMissingMacvtapParameters(
vif_id=vif['id'],
missing_params=missing_params)
designer.set_vif_host_backend_direct_config(
conf, macvtap_src, macvtap_mode)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_tap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def _get_vhostuser_settings(self, vif):
vif_details = vif['details']
mode = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_MODE,
'server')
sock_path = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_SOCKET)
if sock_path is None:
raise exception.VifDetailsMissingVhostuserSockPath(
vif_id=vif['id'])
return mode, sock_path
def get_config_vhostuser(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
mode, sock_path = self._get_vhostuser_settings(vif)
designer.set_vif_host_backend_vhostuser_config(conf, mode, sock_path)
# (vladikr) Not setting up driver and queues for vhostuser
# as queues are not supported in Libvirt until version 1.2.17
if not host.has_min_version(MIN_LIBVIRT_VHOSTUSER_MQ):
LOG.debug('Queues are not a vhostuser supported feature.')
conf.driver_name = None
conf.vhost_queues = None
return conf
def get_config_ib_hostdev(self, instance, vif, image_meta,
inst_type, virt_type, host):
return self.get_base_hostdev_pci_config(vif)
def get_config_vrouter(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def _set_config_VIFBridge(self, instance, vif, conf):
conf.net_type = "bridge"
conf.source_dev = vif.bridge_name
conf.target_dev = vif.vif_name
if self.get_firewall_required_os_vif(vif):
mac_id = vif.address.replace(':', '')
name = "nova-instance-" + instance.name + "-" + mac_id
conf.filtername = name
def _set_config_VIFOpenVSwitch(self, instance, vif, conf):
conf.net_type = "bridge"
conf.source_dev = vif.bridge_name
conf.target_dev = vif.vif_name
self._set_config_VIFPortProfile(instance, vif, conf)
def _set_config_VIFPortProfileOpenVSwitch(self, profile, conf):
conf.vporttype = "openvswitch"
conf.add_vport_param("interfaceid",
profile.interface_id)
def _set_config_VIFPortProfile(self, instance, vif, conf):
# Set any port profile that may be required
profilefunc = "_set_config_" + vif.port_profile.obj_name()
func = getattr(self, profilefunc, None)
if not func:
raise exception.NovaException(
"Unsupported VIF port profile type %(obj)s func %(func)s" %
{'obj': vif.port_profile.obj_name(), 'func': profilefunc})
func(vif.port_profile, conf)
def _get_config_os_vif(self, instance, vif, image_meta, inst_type,
virt_type, host):
"""Get the domain config for a VIF
:param instance: nova.objects.Instance
:param vif: os_vif.objects.vif.VIFBase subclass
:param image_meta: nova.objects.ImageMeta
:param inst_type: nova.objects.Flavor
:param virt_type: virtualization type
:param host: nova.virt.libvirt.host.Host
:returns: nova.virt.libvirt.config.LibvirtConfigGuestInterface
"""
# Do the config that's common to all vif types
conf = self.get_base_config(instance, vif.address, image_meta,
inst_type, virt_type)
# Do the VIF type specific config
viffunc = "_set_config_" + vif.obj_name()
func = getattr(self, viffunc, None)
if not func:
raise exception.NovaException(
"Unsupported VIF type %(obj)s func %(func)s" %
{'obj': vif.obj_name(), 'func': viffunc})
func(instance, vif, conf)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta,
inst_type, virt_type, host):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s virt_type=%(virt_type)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif, 'virt_type': virt_type})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
return self._get_config_os_vif(instance, vif_obj, image_meta,
inst_type, virt_type, host)
# Legacy non-os-vif codepath
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'get_config_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
return func(instance, vif, image_meta,
inst_type, virt_type, host)
def _plug_bridge_with_port(self, instance, vif, port):
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
disv6 = '/proc/sys/net/ipv6/conf/%s/disable_ipv6' % br_name
if os.path.exists(disv6):
utils.execute('tee',
disv6,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
mtu = vif['network'].get_meta('mtu')
linux_net._create_veth_pair(v1_name, v2_name, mtu)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
if port == 'ovs':
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id,
vif['address'], instance.uuid,
mtu)
elif port == 'ivs':
linux_net.create_ivs_vif_port(v2_name, iface_id,
vif['address'], instance.uuid)
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ovs')
def plug_ivs_ethernet(self, instance, vif):
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance.uuid)
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ivs')
def plug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ivs_hybrid(instance, vif)
else:
self.plug_ivs_ethernet(instance, vif)
def plug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
pci_slot = vif['profile']['pci_slot']
device_id = instance['uuid']
vnic_mac = vif['address']
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id,
fabric, network_model.VIF_TYPE_IB_HOSTDEV,
pci_slot, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(
_LE("Failed while plugging ib hostdev vif"),
instance=instance
)
def plug_802qbg(self, instance, vif):
pass
def plug_802qbh(self, instance, vif):
pass
def plug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
linux_net.set_vf_interface_vlan(
vif['profile']['pci_slot'],
mac_addr=vif['address'],
vlan=vif['details'][network_model.VIF_DETAILS_VLAN])
def plug_hostdev_physical(self, instance, vif):
pass
def plug_macvtap(self, instance, vif):
vif_details = vif['details']
vlan = vif_details.get(network_model.VIF_DETAILS_VLAN)
if vlan:
vlan_name = vif_details.get(
network_model.VIF_DETAILS_MACVTAP_SOURCE)
phys_if = vif_details.get(network_model.VIF_DETAILS_PHYS_INTERFACE)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan(
vlan, phys_if, interface=vlan_name)
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net.create_tap_dev(dev)
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance.project_id
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm', iface_id, vif['address'],
'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_tap(self, instance, vif):
"""Plug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
mac = vif['details'].get(network_model.VIF_DETAILS_TAP_MAC_ADDRESS)
linux_net.create_tap_dev(dev, mac)
network = vif.get('network')
mtu = network.get_meta('mtu') if network else None
linux_net._set_device_mtu(dev, mtu)
def plug_vhostuser_fp(self, instance, vif):
"""Create a fp netdevice interface with a vhostuser socket"""
dev = self.get_vif_devname(vif)
if linux_net.device_exists(dev):
return
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
sockmode_qemu, sockpath = self._get_vhostuser_settings(vif)
sockmode_port = 'client' if sockmode_qemu == 'server' else 'server'
try:
linux_net.create_fp_dev(dev, sockpath, sockmode_port)
if ovs_plug:
if vif.is_hybrid_plug_enabled():
self.plug_ovs_hybrid(instance, vif)
utils.execute('brctl', 'addif',
self.get_br_name(vif['id']),
dev, run_as_root=True)
else:
iface_id = self.get_ovs_interfaceid(vif)
mtu = vif['network'].get_meta('mtu')
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
dev, iface_id,
vif['address'],
instance.uuid, mtu)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_vhostuser_ovs(self, instance, vif):
"""Plug a VIF_TYPE_VHOSTUSER into an ovs bridge"""
iface_id = self.get_ovs_interfaceid(vif)
port_name = os.path.basename(
vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
mtu = vif['network'].get_meta('mtu')
linux_net.create_ovs_vif_port(
self.get_bridge_name(vif),
port_name, iface_id, vif['address'],
instance.uuid, mtu,
interface_type=network_model.OVS_VHOSTUSER_INTERFACE_TYPE)
def plug_vhostuser(self, instance, vif):
fp_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG,
False)
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
if fp_plug:
self.plug_vhostuser_fp(instance, vif)
elif ovs_plug:
self.plug_vhostuser_ovs(instance, vif)
def plug_vrouter(self, instance, vif):
"""Plug into Contrail's network port
Bind the vif to a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
ip_addr = '0.0.0.0'
ip6_addr = None
subnets = vif['network']['subnets']
for subnet in subnets:
if not subnet['ips']:
continue
ips = subnet['ips'][0]
if not ips['address']:
continue
if (ips['version'] == 4):
if ips['address'] is not None:
ip_addr = ips['address']
if (ips['version'] == 6):
if ips['address'] is not None:
ip6_addr = ips['address']
ptype = 'NovaVMPort'
if (CONF.libvirt.virt_type == 'lxc'):
ptype = 'NameSpacePort'
cmd_args = ("--oper=add --uuid=%s --instance_uuid=%s --vn_uuid=%s "
"--vm_project_uuid=%s --ip_address=%s --ipv6_address=%s"
" --vm_name=%s --mac=%s --tap_name=%s --port_type=%s "
"--tx_vlan_id=%d --rx_vlan_id=%d" % (vif['id'],
instance.uuid, vif['network']['id'],
instance.project_id, ip_addr, ip6_addr,
instance.display_name, vif['address'],
vif['devname'], ptype, -1, -1))
try:
linux_net.create_tap_dev(dev)
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def _plug_os_vif(self, instance, vif, raw_vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)
try:
os_vif.plug(vif, instance_info)
except osv_exception.ExceptionBase as ex:
msg = (_("Failure running os_vif plugin plug method: %(ex)s")
% {'ex': ex})
raise exception.NovaException(msg)
# TODO(johngarbutt) remove this hack once 1623876 is fixed in os-vif
network = raw_vif.get('network')
mtu = network.get_meta('mtu') if network else None
if mtu is not None:
linux_net._set_device_mtu(network["bridge"], mtu)
if (type(vif) == os_vif.objects.vif.VIFBridge and
hasattr(vif, "port_profile") and
isinstance(vif.port_profile,
os_vif.objects.vif.VIFPortProfileOpenVSwitch)):
veths = [
("qvb%s" % vif.id)[:network_model.NIC_NAME_LEN],
("qvo%s" % vif.id)[:network_model.NIC_NAME_LEN]]
for veth in veths:
linux_net._set_device_mtu(veth, mtu)
def plug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.VirtualInterfacePlugException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
self._plug_os_vif(instance, vif_obj, vif)
return
# Legacy non-os-vif codepath
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'plug_%s' % vif_slug, None)
if not func:
raise exception.VirtualInterfacePlugException(
_("Plug vif failed because of unexpected "
"vif_type=%s") % vif_type)
func(instance, vif)
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ivs_hybrid(instance, vif)
else:
self.unplug_ivs_ethernet(instance, vif)
def unplug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
vnic_mac = vif['address']
try:
utils.execute('ebrctl', 'del-port', fabric, vnic_mac,
run_as_root=True)
except Exception:
LOG.exception(_LE("Failed while unplugging ib hostdev vif"))
def unplug_802qbg(self, instance, vif):
pass
def unplug_802qbh(self, instance, vif):
pass
def unplug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
# The ip utility doesn't accept the MAC 00:00:00:00:00:00.
# Therefore, keep the MAC unchanged. Later operations on
# the same VF will not be affected by the existing MAC.
linux_net.set_vf_interface_vlan(vif['profile']['pci_slot'],
mac_addr=vif['address'])
def unplug_hostdev_physical(self, instance, vif):
pass
def unplug_macvtap(self, instance, vif):
pass
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
utils.execute('mm-ctl', '--unbind-port', port_id,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_tap(self, instance, vif):
"""Unplug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
try:
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
try:
utils.execute('ifc_ctl', 'gateway', 'ifdown',
dev, run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'del_port', dev,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_vhostuser_fp(self, instance, vif):
"""Delete a fp netdevice interface with a vhostuser socket"""
dev = self.get_vif_devname(vif)
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
try:
if ovs_plug:
if vif.is_hybrid_plug_enabled():
self.unplug_ovs_hybrid(instance, vif)
else:
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
dev, False)
linux_net.delete_fp_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_vhostuser_ovs(self, instance, vif):
"""Unplug a VIF_TYPE_VHOSTUSER into an ovs bridge"""
port_name = os.path.basename(
vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
port_name)
def unplug_vhostuser(self, instance, vif):
fp_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG,
False)
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
if fp_plug:
self.unplug_vhostuser_fp(instance, vif)
elif ovs_plug:
self.unplug_vhostuser_ovs(instance, vif)
def unplug_vrouter(self, instance, vif):
"""Unplug Contrail's network port
Unbind the vif from a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
cmd_args = ("--oper=delete --uuid=%s" % (vif['id']))
try:
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(
_LE("Failed while unplugging vif"), instance=instance)
def _unplug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)
try:
os_vif.unplug(vif, instance_info)
except osv_exception.ExceptionBase as ex:
msg = (_("Failure running os_vif plugin unplug method: %(ex)s")
% {'ex': ex})
raise exception.NovaException(msg)
def unplug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
self._unplug_os_vif(instance, vif_obj)
return
# Legacy non-os-vif codepath
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'unplug_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
func(instance, vif)
|
|
# -*- test-case-name: twisted.test.test_unix,twisted.internet.test.test_unix,twisted.internet.test.test_posixbase -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
UNIX socket support for Twisted.
End users shouldn't use this module directly - use the reactor APIs instead.
Maintainer: Itamar Shtull-Trauring
"""
from __future__ import division, absolute_import
import os
import stat
import socket
import struct
from errno import EINTR, EMSGSIZE, EAGAIN, EWOULDBLOCK, ECONNREFUSED, ENOBUFS
from zope.interface import implementer, implementer_only, implementedBy
if not hasattr(socket, 'AF_UNIX'):
raise ImportError("UNIX sockets not supported on this platform")
from twisted.internet import main, base, tcp, udp, error, interfaces
from twisted.internet import protocol, address
from twisted.python import lockfile, log, reflect, failure
from twisted.python.filepath import _coerceToFilesystemEncoding
from twisted.python.util import untilConcludes
from twisted.python.compat import lazyByteSlice
try:
from twisted.python import sendmsg
except ImportError:
sendmsg = None
def _ancillaryDescriptor(fd):
"""
Pack an integer into an ancillary data structure suitable for use with
L{sendmsg.sendmsg}.
"""
packed = struct.pack("i", fd)
return [(socket.SOL_SOCKET, sendmsg.SCM_RIGHTS, packed)]
@implementer(interfaces.IUNIXTransport)
class _SendmsgMixin(object):
"""
Mixin for stream-oriented UNIX transports which uses sendmsg and recvmsg to
offer additional functionality, such as copying file descriptors into other
processes.
@ivar _writeSomeDataBase: The class which provides the basic implementation
of C{writeSomeData}. Ultimately this should be a subclass of
L{twisted.internet.abstract.FileDescriptor}. Subclasses which mix in
L{_SendmsgMixin} must define this.
@ivar _sendmsgQueue: A C{list} of C{int} holding file descriptors which are
currently buffered before being sent.
@ivar _fileDescriptorBufferSize: An C{int} giving the maximum number of file
descriptors to accept and queue for sending before pausing the
registered producer, if there is one.
"""
_writeSomeDataBase = None
_fileDescriptorBufferSize = 64
def __init__(self):
self._sendmsgQueue = []
def _isSendBufferFull(self):
"""
Determine whether the user-space send buffer for this transport is full
or not.
This extends the base determination by adding consideration of how many
file descriptors need to be sent using L{sendmsg.sendmsg}. When there
are more than C{self._fileDescriptorBufferSize}, the buffer is
considered full.
@return: C{True} if it is full, C{False} otherwise.
"""
# There must be some bytes in the normal send buffer, checked by
# _writeSomeDataBase._isSendBufferFull, in order to send file
# descriptors from _sendmsgQueue. That means that the buffer will
# eventually be considered full even without this additional logic.
# However, since we send only one byte per file descriptor, having lots
# of elements in _sendmsgQueue incurs more overhead and perhaps slows
# things down. Anyway, try this for now, maybe rethink it later.
return (
len(self._sendmsgQueue) > self._fileDescriptorBufferSize
or self._writeSomeDataBase._isSendBufferFull(self))
def sendFileDescriptor(self, fileno):
"""
Queue the given file descriptor to be sent and start trying to send it.
"""
self._sendmsgQueue.append(fileno)
self._maybePauseProducer()
self.startWriting()
def writeSomeData(self, data):
"""
Send as much of C{data} as possible. Also send any pending file
descriptors.
"""
# Make it a programming error to send more file descriptors than you
# send regular bytes. Otherwise, due to the limitation mentioned
# below, we could end up with file descriptors left, but no bytes to
# send with them, therefore no way to send those file descriptors.
if len(self._sendmsgQueue) > len(data):
return error.FileDescriptorOverrun()
# If there are file descriptors to send, try sending them first, using
# a little bit of data from the stream-oriented write buffer too. It
# is not possible to send a file descriptor without sending some
# regular data.
index = 0
try:
while index < len(self._sendmsgQueue):
fd = self._sendmsgQueue[index]
try:
untilConcludes(
sendmsg.sendmsg, self.socket, data[index:index+1],
_ancillaryDescriptor(fd))
except socket.error as se:
if se.args[0] in (EWOULDBLOCK, ENOBUFS):
return index
else:
return main.CONNECTION_LOST
else:
index += 1
finally:
del self._sendmsgQueue[:index]
# Hand the remaining data to the base implementation. Avoid slicing in
# favor of a buffer, in case that happens to be any faster.
limitedData = lazyByteSlice(data, index)
result = self._writeSomeDataBase.writeSomeData(self, limitedData)
try:
return index + result
except TypeError:
return result
def doRead(self):
"""
Calls {IProtocol.dataReceived} with all available data and
L{IFileDescriptorReceiver.fileDescriptorReceived} once for each
received file descriptor in ancillary data.
This reads up to C{self.bufferSize} bytes of data from its socket, then
dispatches the data to protocol callbacks to be handled. If the
connection is not lost through an error in the underlying recvmsg(),
this function will return the result of the dataReceived call.
"""
try:
data, ancillary, flags = untilConcludes(
sendmsg.recvmsg, self.socket, self.bufferSize)
except socket.error as se:
if se.args[0] == EWOULDBLOCK:
return
else:
return main.CONNECTION_LOST
for cmsgLevel, cmsgType, cmsgData in ancillary:
if (cmsgLevel == socket.SOL_SOCKET and
cmsgType == sendmsg.SCM_RIGHTS):
self._ancillaryLevelSOLSOCKETTypeSCMRIGHTS(cmsgData)
else:
log.msg(
format=(
"%(protocolName)s (on %(hostAddress)r) "
"received unsupported ancillary data "
"(level=%(cmsgLevel)r, type=%(cmsgType)r) "
"from %(peerAddress)r."),
hostAddress=self.getHost(), peerAddress=self.getPeer(),
protocolName=self._getLogPrefix(self.protocol),
cmsgLevel=cmsgLevel, cmsgType=cmsgType,
)
return self._dataReceived(data)
def _ancillaryLevelSOLSOCKETTypeSCMRIGHTS(self, cmsgData):
"""
Processes ancillary data with level SOL_SOCKET and type SCM_RIGHTS,
indicating that the ancillary data payload holds file descriptors.
Calls L{IFileDescriptorReceiver.fileDescriptorReceived} once for each
received file descriptor or logs a message if the protocol does not
implement L{IFileDescriptorReceiver}.
@param cmsgData: Ancillary data payload.
@type cmsgData: L{bytes}
"""
fdCount = len(cmsgData) // 4
fds = struct.unpack('i'*fdCount, cmsgData)
if interfaces.IFileDescriptorReceiver.providedBy(self.protocol):
for fd in fds:
self.protocol.fileDescriptorReceived(fd)
else:
log.msg(
format=(
"%(protocolName)s (on %(hostAddress)r) does not "
"provide IFileDescriptorReceiver; closing file "
"descriptor received (from %(peerAddress)r)."),
hostAddress=self.getHost(), peerAddress=self.getPeer(),
protocolName=self._getLogPrefix(self.protocol),
)
for fd in fds:
os.close(fd)
class _UnsupportedSendmsgMixin(object):
"""
Behaviorless placeholder used when C{twisted.python.sendmsg} is not
available, preventing L{IUNIXTransport} from being supported.
"""
if sendmsg:
_SendmsgMixin = _SendmsgMixin
else:
_SendmsgMixin = _UnsupportedSendmsgMixin
class Server(_SendmsgMixin, tcp.Server):
_writeSomeDataBase = tcp.Server
def __init__(self, sock, protocol, client, server, sessionno, reactor):
_SendmsgMixin.__init__(self)
tcp.Server.__init__(self, sock, protocol, (client, None), server, sessionno, reactor)
def getHost(self):
return address.UNIXAddress(self.socket.getsockname())
def getPeer(self):
return address.UNIXAddress(self.hostname or None)
def _inFilesystemNamespace(path):
"""
Determine whether the given unix socket path is in a filesystem namespace.
While most PF_UNIX sockets are entries in the filesystem, Linux 2.2 and
above support PF_UNIX sockets in an "abstract namespace" that does not
correspond to any path. This function returns C{True} if the given socket
path is stored in the filesystem and C{False} if the path is in this
abstract namespace.
"""
return path[:1] not in (b"\0", u"\0")
class _UNIXPort(object):
def getHost(self):
"""
Returns a UNIXAddress.
This indicates the server's address.
"""
return address.UNIXAddress(self.socket.getsockname())
class Port(_UNIXPort, tcp.Port):
addressFamily = socket.AF_UNIX
socketType = socket.SOCK_STREAM
transport = Server
lockFile = None
def __init__(self, fileName, factory, backlog=50, mode=0o666, reactor=None,
wantPID = 0):
tcp.Port.__init__(self, self._buildAddr(fileName).name, factory,
backlog, reactor=reactor)
self.mode = mode
self.wantPID = wantPID
def __repr__(self):
factoryName = reflect.qual(self.factory.__class__)
if hasattr(self, 'socket'):
return '<%s on %r>' % (
factoryName, _coerceToFilesystemEncoding('', self.port))
else:
return '<%s (not listening)>' % (factoryName,)
def _buildAddr(self, name):
return address.UNIXAddress(name)
def startListening(self):
"""
Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
log.msg("%s starting on %r" % (
self._getLogPrefix(self.factory),
_coerceToFilesystemEncoding('', self.port)))
if self.wantPID:
self.lockFile = lockfile.FilesystemLock(self.port + b".lock")
if not self.lockFile.lock():
raise error.CannotListenError(None, self.port,
"Cannot acquire lock")
else:
if not self.lockFile.clean:
try:
# This is a best-attempt at cleaning up
# left-over unix sockets on the filesystem.
# If it fails, there's not much else we can
# do. The bind() below will fail with an
# exception that actually propagates.
if stat.S_ISSOCK(os.stat(self.port).st_mode):
os.remove(self.port)
except:
pass
self.factory.doStart()
try:
skt = self.createInternetSocket()
skt.bind(self.port)
except socket.error as le:
raise error.CannotListenError(None, self.port, le)
else:
if _inFilesystemNamespace(self.port):
# Make the socket readable and writable to the world.
os.chmod(self.port, self.mode)
skt.listen(self.backlog)
self.connected = True
self.socket = skt
self.fileno = self.socket.fileno
self.numberAccepts = 100
self.startReading()
def _logConnectionLostMsg(self):
"""
Log message for closing socket
"""
log.msg('(UNIX Port %s Closed)' % (
_coerceToFilesystemEncoding('', self.port,)))
def connectionLost(self, reason):
if _inFilesystemNamespace(self.port):
os.unlink(self.port)
if self.lockFile is not None:
self.lockFile.unlock()
tcp.Port.connectionLost(self, reason)
class Client(_SendmsgMixin, tcp.BaseClient):
"""A client for Unix sockets."""
addressFamily = socket.AF_UNIX
socketType = socket.SOCK_STREAM
_writeSomeDataBase = tcp.BaseClient
def __init__(self, filename, connector, reactor=None, checkPID = 0):
_SendmsgMixin.__init__(self)
# Normalise the filename using UNIXAddress
filename = address.UNIXAddress(filename).name
self.connector = connector
self.realAddress = self.addr = filename
if checkPID and not lockfile.isLocked(filename + b".lock"):
self._finishInit(None, None, error.BadFileError(filename), reactor)
self._finishInit(self.doConnect, self.createInternetSocket(),
None, reactor)
def getPeer(self):
return address.UNIXAddress(self.addr)
def getHost(self):
return address.UNIXAddress(None)
class Connector(base.BaseConnector):
def __init__(self, address, factory, timeout, reactor, checkPID):
base.BaseConnector.__init__(self, factory, timeout, reactor)
self.address = address
self.checkPID = checkPID
def _makeTransport(self):
return Client(self.address, self, self.reactor, self.checkPID)
def getDestination(self):
return address.UNIXAddress(self.address)
@implementer(interfaces.IUNIXDatagramTransport)
class DatagramPort(_UNIXPort, udp.Port):
"""
Datagram UNIX port, listening for packets.
"""
addressFamily = socket.AF_UNIX
def __init__(self, addr, proto, maxPacketSize=8192, mode=0o666, reactor=None):
"""Initialize with address to listen on.
"""
udp.Port.__init__(self, addr, proto, maxPacketSize=maxPacketSize, reactor=reactor)
self.mode = mode
def __repr__(self):
protocolName = reflect.qual(self.protocol.__class__,)
if hasattr(self, 'socket'):
return '<%s on %r>' % (protocolName, self.port)
else:
return '<%s (not listening)>' % (protocolName,)
def _bindSocket(self):
log.msg("%s starting on %s"%(self.protocol.__class__, repr(self.port)))
try:
skt = self.createInternetSocket() # XXX: haha misnamed method
if self.port:
skt.bind(self.port)
except socket.error as le:
raise error.CannotListenError(None, self.port, le)
if self.port and _inFilesystemNamespace(self.port):
# Make the socket readable and writable to the world.
os.chmod(self.port, self.mode)
self.connected = 1
self.socket = skt
self.fileno = self.socket.fileno
def write(self, datagram, address):
"""Write a datagram."""
try:
return self.socket.sendto(datagram, address)
except socket.error as se:
no = se.args[0]
if no == EINTR:
return self.write(datagram, address)
elif no == EMSGSIZE:
raise error.MessageLengthError("message too long")
elif no == EAGAIN:
# oh, well, drop the data. The only difference from UDP
# is that UDP won't ever notice.
# TODO: add TCP-like buffering
pass
else:
raise
def connectionLost(self, reason=None):
"""Cleans up my socket.
"""
log.msg('(Port %s Closed)' % repr(self.port))
base.BasePort.connectionLost(self, reason)
if hasattr(self, "protocol"):
# we won't have attribute in ConnectedPort, in cases
# where there was an error in connection process
self.protocol.doStop()
self.connected = 0
self.socket.close()
del self.socket
del self.fileno
if hasattr(self, "d"):
self.d.callback(None)
del self.d
def setLogStr(self):
self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)"
@implementer_only(interfaces.IUNIXDatagramConnectedTransport,
*(implementedBy(base.BasePort)))
class ConnectedDatagramPort(DatagramPort):
"""
A connected datagram UNIX socket.
"""
def __init__(self, addr, proto, maxPacketSize=8192, mode=0o666,
bindAddress=None, reactor=None):
assert isinstance(proto, protocol.ConnectedDatagramProtocol)
DatagramPort.__init__(self, bindAddress, proto, maxPacketSize, mode,
reactor)
self.remoteaddr = addr
def startListening(self):
try:
self._bindSocket()
self.socket.connect(self.remoteaddr)
self._connectToProtocol()
except:
self.connectionFailed(failure.Failure())
def connectionFailed(self, reason):
"""
Called when a connection fails. Stop listening on the socket.
@type reason: L{Failure}
@param reason: Why the connection failed.
"""
self.stopListening()
self.protocol.connectionFailed(reason)
del self.protocol
def doRead(self):
"""
Called when my socket is ready for reading.
"""
read = 0
while read < self.maxThroughput:
try:
data, addr = self.socket.recvfrom(self.maxPacketSize)
read += len(data)
self.protocol.datagramReceived(data)
except socket.error as se:
no = se.args[0]
if no in (EAGAIN, EINTR, EWOULDBLOCK):
return
if no == ECONNREFUSED:
self.protocol.connectionRefused()
else:
raise
except:
log.deferr()
def write(self, data):
"""
Write a datagram.
"""
try:
return self.socket.send(data)
except socket.error as se:
no = se.args[0]
if no == EINTR:
return self.write(data)
elif no == EMSGSIZE:
raise error.MessageLengthError("message too long")
elif no == ECONNREFUSED:
self.protocol.connectionRefused()
elif no == EAGAIN:
# oh, well, drop the data. The only difference from UDP
# is that UDP won't ever notice.
# TODO: add TCP-like buffering
pass
else:
raise
def getPeer(self):
return address.UNIXAddress(self.remoteaddr)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceTapConfigurationsOperations(object):
"""NetworkInterfaceTapConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified tap configuration from the NetworkInterface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param tap_configuration_name: The name of the tap configuration.
:type tap_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
tap_configuration_name=tap_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterfaceTapConfiguration"
"""Get the specified tap configuration on a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param tap_configuration_name: The name of the tap configuration.
:type tap_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceTapConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.NetworkInterfaceTapConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
tap_configuration_parameters, # type: "_models.NetworkInterfaceTapConfiguration"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterfaceTapConfiguration"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(tap_configuration_parameters, 'NetworkInterfaceTapConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_interface_name, # type: str
tap_configuration_name, # type: str
tap_configuration_parameters, # type: "_models.NetworkInterfaceTapConfiguration"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkInterfaceTapConfiguration"]
"""Creates or updates a Tap configuration in the specified NetworkInterface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param tap_configuration_name: The name of the tap configuration.
:type tap_configuration_name: str
:param tap_configuration_parameters: Parameters supplied to the create or update tap
configuration operation.
:type tap_configuration_parameters: ~azure.mgmt.network.v2020_06_01.models.NetworkInterfaceTapConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkInterfaceTapConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.NetworkInterfaceTapConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
tap_configuration_name=tap_configuration_name,
tap_configuration_parameters=tap_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceTapConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'tapConfigurationName': self._serialize.url("tap_configuration_name", tap_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations/{tapConfigurationName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceTapConfigurationListResult"]
"""Get all Tap configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceTapConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.NetworkInterfaceTapConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceTapConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceTapConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/tapConfigurations'} # type: ignore
|
|
import os
import re
import sys
import code
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class MyInterpreter(QWidget):
def __init__(self, parent):
super(MyInterpreter, self).__init__(parent)
hBox = QHBoxLayout()
self.setLayout(hBox)
self.textEdit = PyInterp(self)
# this is how you pass in locals to the interpreter
self.textEdit.initInterpreter(locals())
self.resize(650, 300)
self.centerOnScreen()
hBox.addWidget(self.textEdit)
hBox.setMargin(0)
hBox.setSpacing(0)
def centerOnScreen(self):
# center the widget on the screen
resolution = QDesktopWidget().screenGeometry()
self.move((resolution.width() / 2) - (self.frameSize().width() / 2),
(resolution.height() / 2) - (self.frameSize().height() / 2))
class PyInterp(QTextEdit):
class InteractiveInterpreter(code.InteractiveInterpreter):
def __init__(self, locals):
code.InteractiveInterpreter.__init__(self, locals)
def runIt(self, command):
code.InteractiveInterpreter.runsource(self, command)
def __init__(self, parent):
super(PyInterp, self).__init__(parent)
sys.stdout = self
sys.stderr = self
self.refreshMarker = False # to change back to >>> from ...
self.multiLine = False # code spans more than one line
self.command = '' # command to be ran
self.printBanner() # print sys info
self.marker() # make the >>> or ... marker
self.history = [] # list of commands entered
self.historyIndex = -1
self.interpreterLocals = {}
# setting the color for bg and text
palette = QPalette()
palette.setColor(QPalette.Base, QColor(0, 0, 0))
palette.setColor(QPalette.Text, QColor(0, 255, 0))
self.setPalette(palette)
self.setFont(QFont('Courier', 12))
# initilize interpreter with self locals
self.initInterpreter(locals())
def printBanner(self):
self.write(sys.version)
self.write(' on ' + sys.platform + '\n')
self.write('PyQt4 ' + PYQT_VERSION_STR + '\n')
msg = 'Type !hist for a history view and !hist(n) history index recall'
self.write(msg + '\n')
def marker(self):
if self.multiLine:
self.insertPlainText('... ')
else:
self.insertPlainText('>>> ')
def initInterpreter(self, interpreterLocals=None):
if interpreterLocals:
# when we pass in locals, we don't want it to be named "self"
# so we rename it with the name of the class that did the passing
# and reinsert the locals back into the interpreter dictionary
selfName = interpreterLocals['self'].__class__.__name__
interpreterLocalVars = interpreterLocals.pop('self')
self.interpreterLocals[selfName] = interpreterLocalVars
else:
self.interpreterLocals = interpreterLocals
self.interpreter = self.InteractiveInterpreter(self.interpreterLocals)
def updateInterpreterLocals(self, newLocals):
className = newLocals.__class__.__name__
self.interpreterLocals[className] = newLocals
def write(self, line):
self.insertPlainText(line)
self.ensureCursorVisible()
def clearCurrentBlock(self):
# block being current row
length = len(self.document().lastBlock().text()[4:])
if length == 0:
return None
else:
# should have a better way of doing this but I can't find it
[self.textCursor().deletePreviousChar() for x in xrange(length)]
return True
def recallHistory(self):
# used when using the arrow keys to scroll through history
self.clearCurrentBlock()
if self.historyIndex <> -1:
self.insertPlainText(self.history[self.historyIndex])
return True
def customCommands(self, command):
if command == '!hist': # display history
self.append('') # move down one line
# vars that are in the command are prefixed with ____CC and deleted
# once the command is done so they don't show up in dir()
backup = self.interpreterLocals.copy()
history = self.history[:]
history.reverse()
for i, x in enumerate(history):
iSize = len(str(i))
delta = len(str(len(history))) - iSize
line = line = ' ' * delta + '%i: %s' % (i, x) + '\n'
self.write(line)
self.updateInterpreterLocals(backup)
self.marker()
return True
if re.match('!hist\(\d+\)', command): # recall command from history
backup = self.interpreterLocals.copy()
history = self.history[:]
history.reverse()
index = int(command[6:-1])
self.clearCurrentBlock()
command = history[index]
if command[-1] == ':':
self.multiLine = True
self.write(command)
self.updateInterpreterLocals(backup)
return True
return False
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
# proper exit
self.interpreter.runIt('exit()')
if event.key() == Qt.Key_Down:
if self.historyIndex == len(self.history):
self.historyIndex -= 1
try:
if self.historyIndex > -1:
self.historyIndex -= 1
self.recallHistory()
else:
self.clearCurrentBlock()
except:
pass
return None
if event.key() == Qt.Key_Up:
try:
if len(self.history) - 1 > self.historyIndex:
self.historyIndex += 1
self.recallHistory()
else:
self.historyIndex = len(self.history)
except:
pass
return None
if event.key() == Qt.Key_Home:
# set cursor to position 4 in current block. 4 because that's where
# the marker stops
blockLength = len(self.document().lastBlock().text()[4:])
lineLength = len(self.document().toPlainText())
position = lineLength - blockLength
textCursor = self.textCursor()
textCursor.setPosition(position)
self.setTextCursor(textCursor)
return None
if event.key() in [Qt.Key_Left, Qt.Key_Backspace]:
# don't allow deletion of marker
if self.textCursor().positionInBlock() == 4:
return None
if event.key() in [Qt.Key_Return, Qt.Key_Enter]:
# set cursor to end of line to avoid line splitting
textCursor = self.textCursor()
position = len(self.document().toPlainText())
textCursor.setPosition(position)
self.setTextCursor(textCursor)
line = str(self.document().lastBlock().text())[4:] # remove marker
line.rstrip()
self.historyIndex = -1
if self.customCommands(line):
return None
else:
try:
line[-1]
self.haveLine = True
if line[-1] == ':':
self.multiLine = True
self.history.insert(0, line)
except:
self.haveLine = False
if self.haveLine and self.multiLine: # multi line command
self.command += line + '\n' # + command and line
self.append('') # move down one line
self.marker() # handle marker style
return None
if self.haveLine and not self.multiLine: # one line command
self.command = line # line is the command
self.append('') # move down one line
self.interpreter.runIt(self.command)
self.command = '' # clear command
self.marker() # handle marker style
return None
if self.multiLine and not self.haveLine: # multi line done
self.append('') # move down one line
self.interpreter.runIt(self.command)
self.command = '' # clear command
self.multiLine = False # back to single line
self.marker() # handle marker style
return None
if not self.haveLine and not self.multiLine: # just enter
self.append('')
self.marker()
return None
return None
# allow all other key events
super(PyInterp, self).keyPressEvent(event)
if __name__ == '__main__':
app = QApplication(sys.argv)
win = MyInterpreter(None)
win.show()
sys.exit(app.exec_())
|
|
#*******************************************************************************
#
# Filename : crablogger.py
# Description : class for managing crab outputfile logging
# Author : Yi-Mu "Enoch" Chen [ ensc@hep1.phys.ntu.edu.tw ]
#
#*******************************************************************************
import sys, os
import subprocess
import fnmatch
import logformat
from datetime import datetime
import time
class CrabLogger(object):
def __init__(self, site, dirfln, primary, crabjob, output):
self.site = site
self.dirfln = dirfln
self.primary = primary
self.crabjob = crabjob
self.output = output + '/' + crabjob
self.timestamp = None
self.localtable = logformat.LogTable()
self.remotetable = None
## Testing if crab
print ">> Begin Initizalizing..."
if not self.checkremotedir():
raise Exception('requested crab job path not found at remote location')
if not self.checklocaldir():
raise Exception('Error in specifying output!')
print ">> Finished Initizalizing!"
def getoutput(self, compare_with_remote=False):
print ">> Starting comparing...."
retrievelist = []
if os.path.isfile( self.logfilename() ):
self.localtable.load( self.logfilename() )
else:
self.make_remotetable()
self.localtable = self.remotetable
self.localtable.write( self.logfilename() )
if compare_with_remote and not self.remotetable:
self.make_remotetable()
retrievelist.extend( self.compare_remote() )
self.localtable.write( self.logfilename() )
retrievelist.extend( self.compare_local() )
retrievelist = sorted(set( retrievelist )) ## Unique elements only
if len(retrievelist) == 0 :
print ">> Everything done! Nothing to retrieve!"
return
for remotefile in retrievelist:
print ">> Start retrieving...."
self.retrievesingle(remotefile)
def checkremotedir(self):
out = self.listremote(self.remotecrabbase())
for outline in out:
outline = os.path.basename( outline )
if outline > self.timestamp :
self.timestamp = outline
return True
def checklocaldir(self):
if os.path.isdir(self.output):
print "Using existing directory..."
return True
if not os.path.exists(self.output):
print "Creating new directory!"
os.system("mkdir -p " + self.output)
return True
elif op.path.isfile(self.output):
print "Error! output already exsits as a file!"
return False
return False
def remotecrabbase(self):
return "{}/{}/crab_{}/".format( self.dirfln, self.primary, self.crabjob )
def logfilename(self):
return self.output + "/log.txt"
def make_remotetable(self):
print "Generating table from remote location, might take some time..."
self.remotetable = logformat.LogTable() ## scraping everything
self.remotetable.timestamp = self.timestamp
for midpath in self.listremote( self.remotecrabbase() + self.timestamp ):
for rootfile in fnmatch.filter( self.listremote( midpath ) , '*.root' ):
path = rootfile
timestamp,size = self.getfileinfo( rootfile )
self.remotetable.setentry( path, timestamp, size )
def listremote(self,query):
"""Using xrdfs ls"""
print query
proc = subprocess.Popen(["xrdfs", self.site, "ls", query ], stdout=subprocess.PIPE, stderr=subprocess.PIPE )
out, err = proc.communicate()
if err :
print "Error detected when calling xrdfs!"
print "Have you setup your permission? (voms-proxy-init -voms cms -valid 192:0)"
print "Full command >> xrdfs", self.site, "ls", query
raise Exception("Error in input!")
return out.split()
def getfileinfo(self, remotefile):
"""
Using xrdfs stat command
Output of the command is in the format of:
> Path: <somepath>
> Id: <someid>
> Size: <some size in bytes>
> MTime: <time in the format: 2016-07-10 23:52:41>
> Flags: <some flag>16 (IsReadable)
"""
proc = subprocess.Popen(["xrdfs", self.site, "stat", remotefile ], stdout=subprocess.PIPE, stderr=subprocess.PIPE )
out,err = proc.communicate()
out = out.split('\n')
size_line = out[2]
time_line = out[3]
size = size_line.split()[1].strip()
time_inputstring = time_line.split(': ')[1].strip()
time_obj = datetime.strptime( time_inputstring , "%Y-%m-%d %H:%M:%S")
time_outputstring = time_obj.strftime("%y%m%d_%H%M%S" )
return time_outputstring, size
def compare_local(self):
filelist = []
for remotepath,pair in self.localtable.entrylist.iteritems() :
localpath = self.output + '/' + os.path.basename(remotepath)
if not os.path.isfile( localpath ):
print "Adding missing file ", os.path.basename(remotepath)
filelist.append( remotepath )
else:
remotesize = pair[1]
localsize = str(os.path.getsize(localpath))
if localsize != remotesize:
print "Adding mismatch file ", os.path.basename(remotepath),
print "(local:{}/remote:{})".format( localsize , remotesize )
filelist.append( remotepath )
return filelist
def compare_remote(self):
filelist = []
## Force refresh everything if new tame stamp is detected
if self.remotetable.timestamp != self.localtable.timestamp :
print "Remote has a different time stamp! Dropping everything in local log..."
self.localtable = self.remotetable
filelist = [x for x in self.localtable.entrylist ]
return filelist
for remotepath in self.remotetable.entrylist :
remotetime = self.remotetable.getfiletime( remotepath )
remotesize = self.remotetable.getfilesize( remotepath )
if remotepath not in self.localtable.entrylist:
print "Adding remote's new file", os.path.basename(remotepath)
filelist.append(remotepath)
self.localtable.setentry( remotepath, remotetime, remotesize )
elif remotetime != self.localtable.getfiletime(remotepath):
print "Adding remote's updated file", os.path.basename(remotepath)
filelist.append(remotepath)
self.localtable.setentry( remotepath, remotetime, remotesize )
elif remotesize != self.localtable.getfilesize(remotepath):
print "Adding remote's updated file", os.path.basename(remotepath)
filelist.append(remotepath)
self.localtable.setentry( remotepath, remotetime, remotesize )
return filelist
def retrievesingle(self,remotepath):
cmd = "xrdcp -f root://{0}//{1} {2}/{3}".format(
self.site,
remotepath,
self.output,
os.path.basename(remotepath)
)
print "Retrieving file ", os.path.basename(remotepath)
print cmd
os.system(cmd)
if __name__ == "__main__":
mylogger = CrabLogger(
'eoscms.cern.ch',
'/store/group/phys_b2g/BprimeKit_Ntuples_CMSSW_80X/',
'SingleElectron',
'BPK_80X_SingleElectron_Run2016B-PromptReco-v2',
'/store/yichen/bpk_ntuples/80X/'
)
mylogger.getoutput()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handling of block device information and mapping.
This module contains helper methods for intepreting the block
device information and determining the suitable mapping to
guest devices and libvirt XML.
Throughout these methods there are a number of standard
variables / types used
* 'mapping': a dict contains the storage device mapping.
For the default disk types it will contain the following
keys & values:
'disk' -> disk_info
'disk.rescue' -> disk_info
'disk.local' -> disk_info
'disk.swap' -> disk_info
'disk.config' -> disk_info
If any of the default disks are overriden by the block
device info mappings, the hash value will be None
For any ephemeral device there will also be a dict entry
'disk.eph$NUM' -> disk_info
For any volume device there will also be a dict entry:
$path -> disk_info
Finally a special key will refer to the root device:
'root' -> disk_info
* 'disk_info': a tuple specifying disk configuration
It contains the following 3 fields
(disk bus, disk dev, device type)
* 'disk_bus': the guest bus type ('ide', 'virtio', 'scsi', etc)
* 'disk_dev': the device name 'vda', 'hdc', 'sdf', 'xvde' etc
* 'device_type': type of device eg 'disk', 'cdrom', 'floppy'
"""
from nova import block_device
from nova.compute import instance_types
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import configdrive
from nova.virt import driver
LOG = logging.getLogger(__name__)
def has_disk_dev(mapping, disk_dev):
"""Determine if a disk device name has already been used.
Looks at all the keys in mapping to see if any
corresponding disk_info tuple has a device name
matching disk_dev
Returns True if the disk_dev is in use."""
for disk in mapping:
info = mapping[disk]
if info['dev'] == disk_dev:
return True
return False
def get_dev_prefix_for_disk_bus(disk_bus):
"""Determine the dev prefix for a disk bus.
Determine the dev prefix to be combined
with a disk number to fix a disk_dev.
eg 'hd' for 'ide' bus can be used to
form a disk dev 'hda'
Returns the dev prefix or raises an
exception if the disk bus is unknown."""
if disk_bus == "ide":
return "hd"
elif disk_bus == "virtio":
return "vd"
elif disk_bus == "xen":
# Two possible mappings for Xen, xvda or sda
# which are interchangable, so we pick sda
return "sd"
elif disk_bus == "scsi":
return "sd"
elif disk_bus == "usb":
return "sd"
elif disk_bus == "uml":
return "ubd"
elif disk_bus == "lxc":
return None
else:
raise exception.NovaException(
_("Unable to determine disk prefix for %s") %
disk_bus)
def get_dev_count_for_disk_bus(disk_bus):
"""Determine the number disks supported.
Determine how many disks can be supported in
a single VM for a particular disk bus.
Returns the number of disks supported."""
if disk_bus == "ide":
return 4
else:
return 26
def find_disk_dev_for_disk_bus(mapping, bus, last_device=False):
"""Identify a free disk dev name for a bus.
Determines the possible disk dev names for
the bus, and then checks them in order until
it identifies one that is not yet used in the
disk mapping. If 'last_device' is set, it will
only consider the last available disk dev name.
Returns the chosen disk_dev name, or raises an
exception if none is available.
"""
dev_prefix = get_dev_prefix_for_disk_bus(bus)
if dev_prefix is None:
return None
max_dev = get_dev_count_for_disk_bus(bus)
if last_device:
devs = [max_dev - 1]
else:
devs = range(max_dev)
for idx in devs:
disk_dev = dev_prefix + chr(ord('a') + idx)
if not has_disk_dev(mapping, disk_dev):
return disk_dev
raise exception.NovaException(
_("No free disk device names for prefix '%s'"),
dev_prefix)
def is_disk_bus_valid_for_virt(virt_type, disk_bus):
valid_bus = {
'qemu': ['virtio', 'scsi', 'ide', 'usb'],
'kvm': ['virtio', 'scsi', 'ide', 'usb'],
'xen': ['xen', 'ide'],
'uml': ['uml'],
'lxc': ['lxc'],
}
if virt_type not in valid_bus:
raise exception.UnsupportedVirtType(virt=virt_type)
return disk_bus in valid_bus[virt_type]
def get_disk_bus_for_device_type(virt_type,
image_meta=None,
device_type="disk"):
"""Determine the best disk bus to use for a device type.
Considering the currently configured virtualization
type, return the optimal disk_bus to use for a given
device type. For example, for a disk on KVM it will
return 'virtio', while for a CDROM it will return 'ide'
Returns the disk_bus, or returns None if the device
type is not supported for this virtualization"""
# Prefer a disk bus set against the image first of all
if image_meta:
key = "hw_" + device_type + "_bus"
disk_bus = image_meta.get('properties', {}).get(key)
if disk_bus is not None:
if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
raise exception.UnsupportedHardware(model=disk_bus,
virt=virt_type)
return disk_bus
# Otherwise pick a hypervisor default disk bus
if virt_type == "uml":
if device_type == "disk":
return "uml"
elif virt_type == "lxc":
return "lxc"
elif virt_type == "xen":
if device_type == "cdrom":
return "ide"
elif device_type == "disk":
return "xen"
elif virt_type in ("qemu", "kvm"):
if device_type == "cdrom":
return "ide"
elif device_type == "disk":
return "virtio"
return None
def get_disk_bus_for_disk_dev(virt_type, disk_dev):
"""Determine the disk bus for a disk dev.
Given a disk devi like 'hda', 'sdf', 'xvdb', etc
guess what the most appropriate disk bus is for
the currently configured virtualization technology
Returns the disk bus, or raises an Exception if
the disk dev prefix is unknown."""
if disk_dev[:2] == 'hd':
return "ide"
elif disk_dev[:2] == 'sd':
# Reverse mapping 'sd' is not reliable
# there are many possible mappings. So
# this picks the most likely mappings
if virt_type == "xen":
return "xen"
else:
return "scsi"
elif disk_dev[:2] == 'vd':
return "virtio"
elif disk_dev[:3] == 'xvd':
return "xen"
elif disk_dev[:3] == 'ubd':
return "uml"
else:
raise exception.NovaException(
_("Unable to determine disk bus for '%s'") %
disk_dev[:1])
def get_next_disk_info(mapping, disk_bus,
device_type='disk',
last_device=False):
"""Determine the disk info for the next device on disk_bus.
Considering the disks already listed in the disk mapping,
determine the next available disk dev that can be assigned
for the disk bus.
Returns the disk_info for the next available disk."""
disk_dev = find_disk_dev_for_disk_bus(mapping,
disk_bus,
last_device)
return {'bus': disk_bus,
'dev': disk_dev,
'type': device_type}
def get_eph_disk(ephemeral):
return 'disk.eph' + str(ephemeral['num'])
def get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
block_device_info=None,
image_meta=None, rescue=False):
"""Determine how to map default disks to the virtual machine.
This is about figuring out whether the default 'disk',
'disk.local', 'disk.swap' and 'disk.config' images have
been overriden by the block device mapping.
Returns the guest disk mapping for the devices."""
inst_type = instance_types.extract_instance_type(instance)
mapping = {}
if virt_type == "lxc":
# NOTE(zul): This information is not used by the libvirt driver
# however we need to populate mapping so the image can be
# created when the instance is started. This can
# be removed when we convert LXC to use block devices.
root_disk_bus = disk_bus
root_device_type = 'disk'
root_info = get_next_disk_info(mapping,
root_disk_bus,
root_device_type)
mapping['root'] = root_info
mapping['disk'] = root_info
return mapping
if rescue:
rescue_info = get_next_disk_info(mapping,
disk_bus)
mapping['disk.rescue'] = rescue_info
mapping['root'] = rescue_info
os_info = get_next_disk_info(mapping,
disk_bus)
mapping['disk'] = os_info
return mapping
if image_meta and image_meta.get('disk_format') == 'iso':
root_disk_bus = cdrom_bus
root_device_type = 'cdrom'
else:
root_disk_bus = disk_bus
root_device_type = 'disk'
root_device_name = driver.block_device_info_get_root(block_device_info)
if root_device_name is not None:
root_device = block_device.strip_dev(root_device_name)
root_info = {'bus': get_disk_bus_for_disk_dev(virt_type,
root_device),
'dev': root_device,
'type': root_device_type}
else:
root_info = get_next_disk_info(mapping,
root_disk_bus,
root_device_type)
mapping['root'] = root_info
if not block_device.volume_in_mapping(root_info['dev'],
block_device_info):
mapping['disk'] = root_info
eph_info = get_next_disk_info(mapping,
disk_bus)
ephemeral_device = False
if not (block_device.volume_in_mapping(eph_info['dev'],
block_device_info) or
0 in [eph['num'] for eph in
driver.block_device_info_get_ephemerals(
block_device_info)]):
if instance['ephemeral_gb'] > 0:
ephemeral_device = True
if ephemeral_device:
mapping['disk.local'] = eph_info
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
disk_dev = block_device.strip_dev(eph['device_name'])
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
mapping[get_eph_disk(eph)] = {'bus': disk_bus,
'dev': disk_dev,
'type': 'disk'}
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
disk_dev = block_device.strip_dev(swap['device_name'])
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
mapping['disk.swap'] = {'bus': disk_bus,
'dev': disk_dev,
'type': 'disk'}
elif inst_type['swap'] > 0:
swap_info = get_next_disk_info(mapping,
disk_bus)
if not block_device.volume_in_mapping(swap_info['dev'],
block_device_info):
mapping['disk.swap'] = swap_info
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
mapping[vol['mount_device']] = {'bus': disk_bus,
'dev': disk_dev,
'type': 'disk'}
if configdrive.enabled_for(instance):
config_info = get_next_disk_info(mapping,
disk_bus,
last_device=True)
mapping['disk.config'] = config_info
return mapping
def get_disk_info(virt_type, instance, block_device_info=None,
image_meta=None, rescue=False):
"""Determine guest disk mapping info.
This is a wrapper around get_disk_mapping, which
also returns the chosen disk_bus and cdrom_bus.
The returned data is in a dict
- disk_bus: the bus for harddisks
- cdrom_bus: the bus for CDROMs
- mapping: the disk mapping
Returns the disk mapping disk."""
disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, "disk")
cdrom_bus = get_disk_bus_for_device_type(virt_type, image_meta, "cdrom")
mapping = get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
block_device_info,
image_meta, rescue)
return {'disk_bus': disk_bus,
'cdrom_bus': cdrom_bus,
'mapping': mapping}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.