gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import sys
from operator import lt, gt, eq, le, ge
from os.path import (
abspath,
dirname,
join,
)
from distutils.version import StrictVersion
from setuptools import (
Extension,
find_packages,
setup,
)
class LazyCythonizingList(list):
cythonized = False
def lazy_cythonize(self):
if self.cythonized:
return
self.cythonized = True
from Cython.Build import cythonize
from numpy import get_include
self[:] = cythonize(
[
Extension(*ext_args, include_dirs=[get_include()])
for ext_args in self
]
)
def __iter__(self):
self.lazy_cythonize()
return super(LazyCythonizingList, self).__iter__()
def __getitem__(self, num):
self.lazy_cythonize()
return super(LazyCythonizingList, self).__getitem__(num)
ext_modules = LazyCythonizingList([
('zipline.assets._assets', ['zipline/assets/_assets.pyx']),
('zipline.lib.adjusted_array', ['zipline/lib/adjusted_array.pyx']),
('zipline.lib.adjustment', ['zipline/lib/adjustment.pyx']),
(
'zipline.data.ffc.loaders._us_equity_pricing',
['zipline/data/ffc/loaders/_us_equity_pricing.pyx']
),
])
STR_TO_CMP = {
'<': lt,
'<=': le,
'=': eq,
'==': eq,
'>': gt,
'>=': ge,
}
def _filter_requirements(lines_iter):
for line in lines_iter:
line = line.strip()
if not line or line.startswith('#'):
continue
# pip install -r understands line with ;python_version<'3.0', but
# whatever happens inside extras_requires doesn't. Parse the line
# manually and conditionally add it if needed.
if ';' not in line:
yield line
continue
requirement, version_spec = line.split(';')
try:
groups = re.match(
"(python_version)([<>=]{1,2})(')([0-9\.]+)(')(.*)",
version_spec,
).groups()
comp = STR_TO_CMP[groups[1]]
version_spec = StrictVersion(groups[3])
except Exception as e:
# My kingdom for a 'raise from'!
raise AssertionError(
"Couldn't parse requirement line; '%s'\n"
"Error was:\n"
"%r" % (line, e)
)
sys_version = '.'.join(list(map(str, sys.version_info[:3])))
if comp(sys_version, version_spec):
yield requirement
def read_requirements(path):
"""
Read a requirements.txt file, expressed as a path relative to Zipline root.
"""
real_path = join(dirname(abspath(__file__)), path)
with open(real_path) as f:
return list(_filter_requirements(f.readlines()))
def install_requires():
return read_requirements('etc/requirements.txt')
def extras_requires():
dev_reqs = read_requirements('etc/requirements_dev.txt')
talib_reqs = ['TA-Lib==0.4.9']
return {
'dev': dev_reqs,
'talib': talib_reqs,
'all': dev_reqs + talib_reqs,
}
def module_requirements(requirements_path, module_names):
module_names = set(module_names)
found = set()
module_lines = []
parser = re.compile("([^=<>]+)([<=>]{1,2})(.*)")
for line in read_requirements(requirements_path):
match = parser.match(line)
if match is None:
raise AssertionError("Could not parse requirement: '%s'" % line)
groups = match.groups()
name = groups[0]
if name in module_names:
found.add(name)
module_lines.append(line)
if found != module_names:
raise AssertionError(
"No requirements found for %s." % module_names - found
)
return module_lines
def pre_setup():
if not set(sys.argv) & {'install', 'develop', 'egg_info', 'bdist_wheel'}:
return
try:
import pip
if StrictVersion(pip.__version__) < StrictVersion('7.1.0'):
raise AssertionError(
"Zipline installation requires pip>=7.1.0, but your pip "
"version is {version}. \n"
"You can upgrade your pip with "
"'pip install --upgrade pip'.".format(
version=pip.__version__,
)
)
except ImportError:
raise AssertionError("Zipline installation requires pip")
required = ('Cython', 'numpy')
for line in module_requirements('etc/requirements.txt', required):
pip.main(['install', line])
pre_setup()
setup(
name='zipline',
version='0.8.0rc1',
description='A backtester for financial algorithms.',
author='Quantopian Inc.',
author_email='opensource@quantopian.com',
packages=find_packages('.', include=['zipline', 'zipline.*']),
ext_modules=ext_modules,
scripts=['scripts/run_algo.py'],
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=install_requires(),
extras_require=extras_requires(),
url="http://zipline.io"
)
|
|
from __future__ import print_function
from cmd3.console import Console
from cmd3.shell import command
from cloudmesh_inventory.command_inventory import inventory
import hostlist
from cloudmesh_base.locations import config_file
# TODO: delete row
# TODO: add columns
# TODO: ATTRIBUTE=VALUE
class cm_shell_inventory:
def activate_cm_shell_inventory(self):
self.register_command_topic('system', 'inventory')
@command
def do_inventory(self, args, arguments):
"""
::
Usage:
inventory add NAMES [--label=LABEL]
[--service=SERVICES]
[--project=PROJECT]
[--owners=OWNERS]
[--comment=COMMENT]
[--cluster=CLUSTER]
[--ip=IP]
inventory set NAMES for ATTRIBUTE to VALUES
inventory delete NAMES
inventory clone NAMES from SOURCE
inventory list [NAMES] [--format=FORMAT] [--columns=COLUMNS]
inventory info
Arguments:
NAMES Name of the resources (example i[10-20])
FORMAT The format of the output is either txt,
yaml, dict, table [default: table].
OWNERS a comma separated list of owners for this resource
LABEL a unique label for this resource
SERVICE a string that identifies the service
PROJECT a string that identifies the project
SOURCE a single host name to clone from
COMMENT a comment
Options:
-v verbose mode
Description:
add -- adds a resource to the resource inventory
list -- lists the resources in the given format
delete -- deletes objects from the table
clone -- copies the content of an existing object
and creates new once with it
set -- sets for the specified objects the attribute
to the given value or values. If multiple values
are used the values are assigned to the and
objects in order. See examples
map -- allows to set attibutes on a set of objects
with a set of values
Examples:
cm inventory add x[0-3] --service=openstack
adds hosts x0, x1, x2, x3 and puts the string
openstack into the service column
cm lists
lists the repository
cm x[3-4] set temperature to 32
sets for the resources x3, x4 the value of the
temperature to 32
cm x[7-8] set ip 128.0.0.[0-1]
sets the value of x7 to 128.0.0.0
sets the value of x8 to 128.0.0.1
cm clone x[5-6] from x3
clones the values for x5, x6 from x3
"""
print(arguments)
filename = config_file("/cloudmesh_inventory.yaml")
sorted_keys = True
if arguments["info"]:
i = inventory()
i.read()
i.info()
elif arguments["list"]:
i = inventory()
i.read()
if arguments["--columns"]:
order = arguments["--columns"].split(",")
else:
order = i.order
print(i.list(format="table", order=order))
elif arguments["NAMES"] is None:
Console.error("Please specify a host name")
#elif arguments["set"]:
# hosts = hostlist.expand_hostlist(arguments["NAMES"])
# i = inventory()
# i.read()
# element = {}
# for attribute in i.order:
# try:
# attribute = arguments["ATTRIBUTE"]
# value = arguments["VALUE"]
# if value is not None:
# element[attribute] = value
# except:
# pass
# element['host'] = arguments["NAMES"]
# i.add(**element)
# print (i.list(format="table"))
elif arguments["set"]:
hosts = hostlist.expand_hostlist(arguments["NAMES"])
values = hostlist.expand_hostlist(arguments["VALUES"])
if len(values) == 1:
values = values * len(hosts)
print (hosts)
print (values)
attribute = arguments["ATTRIBUTE"]
if len(hosts) != len(values):
Console.error("Number of names {:} != number of values{:}".format(len(hosts), len(values)))
i = inventory()
i.read()
for index in range(0,len(hosts)):
host = hosts[index]
value = values[index]
object = { 'host': host,
attribute : value }
i.add(**object)
print (i.list(format="table"))
elif arguments["add"]:
hosts = hostlist.expand_hostlist(arguments["NAMES"])
i = inventory()
i.read()
element = {}
for attribute in i.order:
try:
value = arguments["--" + attribute]
if value is not None:
element[attribute] = value
except:
pass
element['host'] = arguments["NAMES"]
i.add(**element)
print (i.list(format="table"))
elif arguments["delete"]:
hosts = hostlist.expand_hostlist(arguments["NAMES"])
i = inventory()
i.read()
for host in hosts:
del i.data[host]
i.save()
elif arguments["clone"]:
hosts = hostlist.expand_hostlist(arguments["NAMES"])
source = arguments["SOURCE"]
i = inventory()
i.read()
if source in i.data:
for host in hosts:
i.data[host] = dict(i.data[source])
i.save()
else:
Console.error("The source {:} does not exist".format(source))
if __name__ == '__main__':
command = cm_shell_inventory()
command.do_system("iu.edu")
|
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from .common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
"""Tests the DBSCAN algorithm with a similarity array."""
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
"""Tests the DBSCAN algorithm with a feature vector array."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10, random_state=0)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10,
random_state=0)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN().fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
"""Tests the DBSCAN algorithm with a callable metric."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
"""Tests the DBSCAN algorithm with balltree for neighbor calculation."""
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
"""DBSCAN.fit should accept a list of lists."""
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
"""Test bad argument values: these should all raise ValueErrors"""
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=5)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=5)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=5)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=5)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=5)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=5)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5, 0.1],
eps=1.5, min_samples=5)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=5)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=5)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight, random_state=42)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated, random_state=42)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed', random_state=42)
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN(random_state=42).fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN(random_state=42)
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
|
|
#!/usr/bin/env python
# irc.py - IRC protocol library
import random
import sys
"""
irc - IRC protocol library
Unlike most IRC protocol implementations, this library is solely concerned with
speaking the IRC protocol; it does not do any networking of its own. This module
provides five classes: "IRC", "Event", "Channel", "User", and "Server".
The IRC class maintains a single session's IRC state, and consumes input lines
to produce output lines and events; the Event class represents a single event.
Channel, User, and Server represent entities in the IRC protocol.
"""
class ProtocolError(Exception):
pass
class Event:
"""
Represents a single IRC event.
"""
def __init__(self, kind, src, dest=None, target=None, args=None): # {{{
self._kind = kind
self._src = src
self._dest = dest
self._target = target
self._args = args
# }}}
def kind(self): # {{{
return self._kind
# }}}
def src(self): # {{{
return self._src
# }}}
def dest(self): # {{{
return self._dest
# }}}
def target(self): # {{{
return self._target
# }}}
def args(self): # {{{
return self._args
# }}}
def __str__(self): # {{{
return '<Event kind=%s src=%s dest=%s target=%s args=%s>' % (
self._kind, self._src, self._dest, self._target, self._args)
# }}}
class Channel:
"""
Represents a single IRC channel.
"""
def _add_user(self, user, modes=''): # {{{
self._users[user] = modes
# }}}
def __init__(self, name): # {{{
self._name = name
self._users = {}
# }}}
def name(self): # {{{
return self._name
# }}}
def is_user(self): # {{{
return False
# }}}
def is_server(self): # {{{
return False
# }}}
def is_channel(self): # {{{
return True
# }}}
def __str__(self): # {{{
return '<Channel "%s">' % self._name
# }}}
def users(self): # {{{
return _users
# }}}
class User:
"""
Represents a single IRC user.
"""
def _update(self, user, host): # {{{
self._user = user
self._host = host
# }}}
def __init__(self, nick, user, host): # {{{
self._nick = nick
self._user = user
self._host = host
# }}}
def nick(self): # {{{
return self._nick
# }}}
def user(self): # {{{
return self._user
# }}}
def host(self): # {{{
return self._host
# }}}
def is_user(self): # {{{
return True
# }}}
def is_server(self): # {{{
return False
# }}}
def is_channel(self): # {{{
return False
# }}}
def __str__(self): # {{{
return '<User nick="%s">' % self._nick
# }}}
class Server:
"""
Represents a single IRC server.
"""
def __init__(self, name): # {{{
self._name = name
# }}}
def name(self): # {{{
return self._name
# }}}
def is_user(self): # {{{
return False
# }}}
def is_server(self): # {{{
return True
# }}}
def is_channel(self): # {{{
return False
# }}}
def __str__(self): # {{{
return '<Server "%s">' % self._name
# }}}
class IRC:
"""
Represents a single IRC client session. Fed input lines; produces output
lines (to be sent to the server) and events. Other methods can be used to
send commands to the IRC server, which will appear in the output line
buffer.
"""
def _genstr(self, template): # {{{
while 'X' in template:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
c = self._random.choice(chars)
template = template.replace('X', c, 1)
print template
return template
# }}}
def _add_user(self, user): # {{{
assert(user.nick() not in self._users)
self._users[user.nick()] = user
# }}}
def _parsesource(self, src): # {{{
if src[0] == ':':
src = src[1:]
if '!' in src and '@' in src:
# Long-form nick!user@host
(nick, userathost) = src.split('!', 1)
(user, host) = userathost.split('@', 1)
u = self.user(nick)
if not u:
u = User(nick, user, host)
# Note that we don't add the new user to our dictionary - we
# can't necessarily see them, and if we can't see users, we
# don't know when to clean out the old reference to them.
# Messages that indicate that we can see users (channel joins)
# add the source themselves if need be.
if u.user() != user or u.host() != host:
# If we see a user@host that doesn't match the existing
# user@host for an old user, update it here.
u._update(user, host)
return u
if '!' not in src and '@' not in src and '.' in src:
# Dotted server name
s = self.server(src)
if not s:
s = Server(src)
return s
# Short-form user reference (just nick). Never actually seen in the
# wild, but the RFC allows it...
u = self.user(src)
if not u:
u = User(src, None, None)
return u
# }}}
def _parsedest(self, dest): # {{{
if dest[0] in self._opts['chantypes']:
c = self.channel(dest)
if not c:
c = Channel(dest)
# Don't learn about c here. We might be seeing an event for a
# channel we're not in (!?), although that'd be pretty messed
# up.
return c
elif dest == '*':
# Special case - some ircds send this as the dest of pre-auth
# notices. Not a valid nickname, in any case.
return None
else:
u = self.user(dest)
if not u:
u = User(dest, None, None)
return u
# }}}
def _parse_ping(self, line): # {{{
(cmd, rest) = line.split(' ', 1)
self._add_output('PONG %s' % rest)
# }}}
def _parse_001(self, src, cmd, rest): # {{{
# <nick> :<message>
# This tells us our own nickname, and src tells us our remote server.
if not src.is_server():
raise ProtocolError('001 from non-server: %s' % src)
(nick, rest) = rest.split(' ', 1)
self._server = src
u = User(nick, None, None)
print 'Learned ourselves: %s' % u
self._add_user(u)
self._self = u
self._add_event(Event('connected', src, u))
# }}}
def _parse_005(self, src, cmd, rest): # {{{
# <nick> <005token>... :<message>
# There are a lot of 005tokens.
if not src.is_server():
raise ProtocolError('005 from non-server: %s' % src)
(nick, rest) = rest.split(' ', 1)
while not rest.startswith(':'):
(token, rest) = rest
(key, val) = token.split('=', 1)
key = key.lower()
if not val and key in self._opts:
self._opts[key] = True
elif key == 'chantypes':
self._opts['chantypes'] = val
elif key == 'chanmodes':
# ban-like, arg-for-both, arg-for-set, no-arg
cmodes = val.split(',')
self._opts['chanmodes'] = [cmodes[0], cmodes[1],
cmodes[2], cmodes[3]]
elif key == 'chanlimit':
limits = val.split(',')
res = {}
for l in limits:
(key, val) = l.split(':', 1)
res[key] = val
self._opts['chanlimit'] = res
elif key == 'prefix':
# Ick. (modes)sigils.
(modes, sigils) = val.split(')', 1)
modes = modes.strip('(')
if len(modes) != len(sigils):
raise ProtocolError('len(modes) != len(sigils) in 005 PREFIX: %s' % val)
for i in range(len(modes)):
self._opts['prefixes'][sigils[i]] = modes[i]
elif key == 'maxlist':
maxes = val.split(',')
for m in maxes:
(lists, mv) = m.split(':', 1)
self._opts['maxlist'][lists] = mv
elif key == 'modes':
self._opts['modes'] = int(val)
elif key == 'network':
self._opts['network'] = val
elif key == 'callerid':
self._opts['callerid'] = val
elif key == 'nicklen':
self._opts['nicklen'] = int(val)
elif key == 'channellen':
self._opts['channellen'] = int(val)
elif key == 'topiclen':
self._opts['topiclen'] = int(val)
elif key == 'deaf':
self._opts['deaf'] = val
elif key == 'monitor':
self._opts['monitor'] = int(val)
elif key == 'targmax':
for targmax in val.split(','):
(k, v) = targmax.split(':', 1)
if not v:
next
k = k.lower()
self._opts['targmax'][k] = int(v)
elif key == 'extban':
(char, types) = val.split(',')
self._opts['extban_char'] = char
self._opts['extban_types'] = types
elif key == 'clientver':
self._opts['clientver'] = float(val)
elif key == 'elist':
self._opts['elist'] = val
# }}}
def _parse_join(self, src, cmd, rest): # {{{
# <channel> [:message]
if not src.is_user():
raise ProtocolError('join from non-user source %s' % src)
if src.nick() not in self._users:
# We learned a new user!
self._users[src.nick().lower()] = src
if ' :' in rest:
(chan, msg) = rest.split(' ', 1)
else:
chan = rest
c = self._parsedest(chan)
if not c.is_channel():
raise ProtocolError('join to non-channel dest %s' % chan)
if src == self._self and chan.lower() not in self._channels:
# If we're joining, and we don't already know this channel, learn
# it.
self._channels[chan.lower()] = c
self._add_event(Event('join', src, c))
c._add_user(src)
# }}}
def _parse_chmode(self, src, dest, modes): # {{{
pass
# }}}
def _parse_mode(self, src, cmd, rest): # {{{
# <dest> <modes...>
(dest, modes) = rest.split(' ', 1)
if not dest:
print '!?!?'
return
if dest.is_channel():
self._parse_chmode(src, dest, modes)
# }}}
def _parse_privmsg(self, src, cmd, rest): # {{{
# <target> :<text...>
(dest, msg) = rest.split(' ', 1)
dest = self._parsedest(dest)
if msg[0] == ':':
msg = msg[1:]
self._add_event(Event('privmsg', src, dest, None, msg))
# }}}
def _parse_unknown(self, src, cmd, rest): # {{{
print 'Ignored: %s %s "%s"' % (src, cmd, rest)
# }}}
def _parse(self, line): # {{{
"""
Parses a single line of IRC input text, updating state and filling
output and event buffers as appropriate.
"""
line = line.replace('\n', '').replace('\r', '')
# Oddball case with no source at all first.
if line.lower().startswith('PING '):
self._parsers['ping'](line)
return
# Now that we know we have a source, pull it out of the string.
(src, rest) = line.split(' ', 1)
src = self._parsesource(src)
# Next up, the command.
(cmd, rest) = rest.split(' ', 1)
cmd = cmd.lower()
# Now hand off to the command-specific parser.
if cmd in self._parsers:
self._parsers[cmd](src, cmd, rest)
else:
self._parsers['?'](src, cmd, rest)
# }}}
# Methods you might need to override to subclass this start here.
def _add_output(self, line): # {{{
"""
Adds a new line of output to the output buffer. You probably should not
call this; use the quote() function instead. If you are subclassing IRC
to provide notifications when new output is ready, override this.
"""
self._output.append(line)
# }}}
def _add_event(self, event): # {{{
"""
Adds a new event (an object with the interface of the Event class) to
the event buffer. You probably should not call this directly. If you are
subclassing IRC to provide notifications when new events are ready,
override this.
"""
self._events.append(event)
# }}}
# Public interface starts here
def __init__(self, nick=None, user='ircpy', gecos='irc.py', password=None): # {{{
"""
Initialize an instance. If nick is not given, a random nick is
generated; if user is not given, user = nick.
"""
self._parsers = {
'ping': self._parse_ping,
'001': self._parse_001,
'005': self._parse_005,
'join': self._parse_join,
'mode': self._parse_mode,
'privmsg': self._parse_privmsg,
'cap': self._parse_cap,
'?': self._parse_unknown,
}
self._opts = {
# Conservative guesses for all option values if we don't get them in
# 005.
'chantypes': '#',
'excepts': False,
'invex': False,
'chanmodes': [ 'b', 'k', 'l', 'imnpst' ],
'prefixes': { '@': 'o', '+': 'v' },
'chanlimit': { '#': 30 },
'maxlist': { 'b': 30 },
'modes': 1,
'knock': False,
'network': None,
'nicklen': 8,
'channellen': 16,
'topiclen': 300,
'etrace': False,
'cprivmsg': False,
'cnotice': False,
'deaf': None,
'monitor': None,
'fnc': False,
'targmax': {
'names': 1,
'list': 1,
'kick': 1,
'whois': 1,
'privmsg': 1,
'notice': 1,
'accept': 1,
'monitor': 1
},
'extban_char': None,
'extban_types': None,
'whox': False,
'clientver': 1.0,
'elist': None
}
self._random = random.Random()
self._output = []
self._events = []
self._users = {}
self._channels = {}
self._servers = {}
self._self = None
self._server = None
nick = nick or self._genstr('ircpy-XXXXXXXX')
if password:
self.add_output('PASS :%s' % password)
self._add_output('CAP LS')
self._add_output('NICK %s' % nick)
self._add_output('USER %s %s "" :%s' % (user, user, gecos))
# }}}
def input(self, line): # {{{
"""
Update this IRC session's state from a new line of input. May cause new
lines of output to be generated, or new events to be generated.
"""
self._parse(line)
# }}}
def output(self): # {{{
"""
Returns all pending output for this IRC session. The returned output is
removed from the output buffer.
"""
out = self._output
self._output = []
return out
# }}}
def events(self): # {{{
"""
Returns all pending events for this IRC session. The returned events are
removed from the event buffer.
"""
evts = self._events
self._events = []
return evts
# }}}
def user(self, nick=None): # {{{
"""
Returns an IRC user by nickname, or None if no user by that nickname is
known. If no nickname is supplied, returns the user of this session
(i.e., the user as which we are connected).
"""
if not nick:
return self._self
nick = nick.lower()
if nick not in self._users:
return None
return self._users[nick]
# }}}
def users(self): # {{{
"""
Returns all known users, as a nick -> user object dictionary.
"""
return self._users
# }}}
def server(self, name=None): # {{{
"""
Returns an IRC server by name, or None if no server by that name is
known. If no name is supplied, returns the server this session is
directly connected to.
"""
if not name:
return self._server
name = name.lower()
if name not in self._servers:
return None
return self._servers[name]
# }}}
def servers(self): # {{{
"""
Returns all known servers, as a name -> server object dictionary.
"""
return self._servers
# }}}
def channel(self, name): # {{{
"""
Returns an IRC channel by name, or None if no channel by that name is
known.
"""
name = name.lower()
if name not in self._channels:
return None
return self._channels[name]
# }}}
def channels(self): # {{{
"""
Returns all known channels, as a name -> channel object dictionary.
"""
return self._channels
# }}}
# IRC protocol methods
def join(self, channel, key=None): # {{{
if key:
self._add_output('JOIN %s %s' % (channel, key))
else:
self._add_output('JOIN %s' % channel)
# }}}
def part(self, channel): # {{{
self._add_output('PART %s' % channel)
# }}}
def msg(self, target, text): # {{{
self._add_output('PRIVMSG %s :%s' % (target, text))
# }}}
if __name__ == '__main__':
irc = IRC(nick='tester')
while True:
outs = irc.output()
evts = irc.events()
for out in outs:
print 'out: %s' % out
for evt in evts:
print 'evt: %s' % evt
line = sys.stdin.readline()
if not line:
break
irc.input(line)
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from importlib import import_module
import inspect
import sys
import collections
import datetime
import time
import six
import logging
from pythonjsonlogger import jsonlogger
from werkzeug.local import Local
import json
class ThreadStateManager(Local):
def __init__(self):
super(ThreadStateManager, self).__init__()
self.stack = []
class memoized_property(object):
"""A read-only @property that is only evaluated once. Copied from
dogpile.cache (created by Mike Bayer et al)."""
def __init__(self, fget, doc=None):
self.counter = 10
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
result = self.fget(obj)
setattr(obj, self.__name__, result)
self.counter += 1
return result
def unix_epoch_time():
return int(time.mktime(datetime.datetime.now().timetuple()))
class OrderedSet(collections.MutableSet):
# The Following recipe was posted by Raymond Hettinger
# at: http://code.activestate.com/recipes/576694/
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def caller_module(depth=1):
frm = inspect.stack()[depth + 1]
caller = inspect.getmodule(frm[0])
return caller
def caller_package(depth=1):
module = caller_module(depth + 1)
f = getattr(module, '__file__', '')
if '__init__.py' in f:
# module is a package
return module
# go up one level to get the package
package_name = module.__name__.rsplit('.', 1)[0]
return sys.modules[package_name]
# borrowed from pyramid.path.DottedNameResolver
def maybe_resolve(value, package=None):
if not isinstance(value, str):
return value
if package is None and value.startswith('.'):
package = caller_package()
module = getattr(package, '__name__', None) # package may be None
if not module:
module = None
if value == '.':
if module is None:
raise ValueError(
'relative name %r irresolveable without package' % (value,)
)
name = module.split('.')
else:
name = value.split('.')
if not name[0]:
if module is None:
raise ValueError(
'relative name %r irresolveable without '
'package' % (value,))
module = module.split('.')
name.pop(0)
while not name[0]:
module.pop()
name.pop(0)
name = module + name
used = name.pop(0)
found = __import__(used)
for n in name:
used += '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n) # pragma: no cover
return found
# from asphalt:
def resolve_reference(ref):
"""
Return the object pointed to by ``ref``.
If ``ref`` is not a string or does not contain ``:``, it is returned as is.
References must be in the form <modulename>:<varname> where <modulename> is the fully
qualified module name and varname is the path to the variable inside that module.
For example, "concurrent.futures:Future" would give you the
:class:`~concurrent.futures.Future` class.
:raises LookupError: if the reference could not be resolved
"""
if not isinstance(ref, str) or ':' not in ref:
return ref
modulename, rest = ref.split(':', 1)
try:
obj = import_module(modulename)
except ImportError as e:
value = LookupError('error resolving reference {}: could not import module'.format(ref))
six.raise_from(value, e)
return
try:
for name in rest.split('.'):
obj = getattr(obj, name)
return obj
except AttributeError:
raise LookupError('error resolving reference {}: error looking up object'.format(ref))
def qualified_name(obj):
"""Return the qualified name (e.g. package.module.Type) for the given object."""
try:
module = obj.__module__
qualname = obj.__qualname__ if six.PY3 else obj.__name__
except AttributeError:
type_ = type(obj)
module = type_.__module__
qualname = type_.__qualname__ if six.PY3 else type_.__name__
return qualname if module in ('typing', 'builtins') else '{}.{}'.format(module, qualname)
def init_logger():
logger = logging.getLogger('yosaipy2')
formatter = jsonlogger.JsonFormatter()
for h in logger.handlers:
h.setFormatter(formatter)
def get_logger():
return logging.getLogger('yosaipy2')
def json_load(file_handle):
return _byteify(
json.load(file_handle, object_hook=_byteify),
ignore_dicts=True
)
def json_loads(json_text):
return _byteify(
json.loads(json_text, object_hook=_byteify),
ignore_dicts=True
)
def _byteify(data, ignore_dicts=False):
if isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
return data
|
|
#!/usr/bin/env python
"""
------------------------------------------------------------------------
Routine to translate X and Y pixel coordinates to RA, DEC values in
multiprocessing mode.
Usage: python pix2sky_multi.py [options] image pixfile
Input:
image: Input image with basic header keywords (CRVAL1, CRVAL2, CRPIX1, CRPIX2)
pixfile: Input file with (x, y) pixel value in column 1 and 2
[Options]:
--help: help
--version: program version
--verbose: show result messages
--quiet: don't show messages
--filename: output file name (default is pix2sky.dat)
--degree: (ra, dec) in degrees? (default is yes) else in hh:mm:ss/degree:arcmin:arcsec format
--mode: multiprocessing mode [pool, process]; default is process
--ncpus: Number of processors to use [default is maximum number of cores on the machine]
--scheduler: Type of scheduler [static, guided, dynamic]; default is guided
Output:
pix2sky.dat: Output file with (x, y) and corresponding (ra, dec)
Author:
Navtej Singh
Organization:
Centre for Astronomy, National University of Ireland, Galway, Ireland
Version:
10 January 2012 1.0 Original version
------------------------------------------------------------------------
"""
# Load python modules to be used in the routine
import sys, math
from os.path import getsize, join, exists
from StringIO import StringIO
from optparse import OptionParser
# Check if multiprocesssing module is present
try:
import multiprocessing as mp
from multiprocessing import Process, Queue
except:
print >> sys.stderr, 'Error: Python multiprocessing module not found. Use serial version of this routine. Exiting.'
sys.exit(-1)
# Create chunks of data to be distributed to multiple processors/cores
# based on the file size. Arbitrary factor of 20 was chosen.
# ====================================================================
def getchunks(infile, n_cpus, scheduler = 'guided'):
# Divide input data based on scheduler type
if scheduler == 'static':
size = getsize(infile) / n_cpus
else:
size = getsize(infile) / (n_cpus * 20)
# Open input file
try:
ifile = open(infile)
except:
print >> sys.stderr, 'Error: Not able to open ', infile, '. Exiting.'
sys.exit(-1)
# Create chunk of data to be distributed to nodes
while 1:
start = ifile.tell()
ifile.seek(size, 1)
s = ifile.readline()
yield start, ifile.tell() - start
if not s:
break
# Close the input file
try:
ifile.close()
except:
print >> sys.stderr, 'Warning: Error closing the file ', ifile
# Get header keywords of the input image and save in class variables
# ==================================================================
def getHeader(image):
print >> sys.stdout, '\n Getting image header keywords...'
# Input can be a single FITS image, multi-extension image or
# multi-extension image with particular extension
if len(image.split( '[', 1 )) > 1:
ext = image.split('[', 1 )[1].replace(']', '')
image = image.split('[', 1)[0]
else:
ext = ''
# Open Header Unit List (HDU) to read header keywords
try:
hdulist = pyfits.open(image)
except:
print >> sys.stderr, 'Error: Not able to read FITS header. Exiting.'
sys.exit(-1)
hdulist.close()
# Get header parameters - checking number of extensions and using 1st extension
# in case of multi extension FITS image
if len(hdulist) > 1:
if ext == '':
hdrdata = hdulist[1].header
else:
hdrdata = hdulist[int(ext)].header
else:
hdrdata = hdulist[0].header
# Get CRPIX keyword values
crpix1 = hdrdata['CRPIX1']
crpix2 = hdrdata['CRPIX2']
# Get CRVAL keyword values
ra0 = hdrdata['CRVAL1']
dec0 = hdrdata['CRVAL2']
# Get CD keyword values
cd11 = hdrdata['CD1_1']
cd12 = hdrdata['CD1_2']
cd21 = hdrdata['CD2_1']
cd22 = hdrdata['CD2_2']
# Return value to calling program
return ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22
# Convert RA from degrees to hour:min:sec
# =======================================
def degree2hours(degrees):
hour = int(degrees)
tmp = (degrees - hour) * 60
min = int(tmp)
sec = (tmp - min) * 60
return '%2d%s%02d%s%2.4f' %(hour, ':', min, ':', sec)
# Translate X,Y pixels to sky coordinates (RA, DEC)
# =================================================
def translate(x, y, ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22, degree):
# Formulas based on IRAF implementation of xy2rd task
xi = cd11 * (x - crpix1) + cd12 * (y - crpix2)
eta = cd21 * (x - crpix1) + cd22 * (y - crpix2)
xi = math.radians(xi)
eta = math.radians(eta)
ra0 = math.radians(ra0)
dec0 = math.radians(dec0)
ra = math.atan2(xi, math.cos(dec0) - eta * math.sin(dec0)) + ra0
dec = math.atan2(eta * math.cos(dec0) + math.sin(dec0), math.sqrt((math.cos(dec0) - eta * math.sin(dec0))**2 + xi**2))
ra = math.degrees(ra)
dec = math.degrees(dec)
ra = ra % 360.0
if ra < 0.0:
ra = ra + 360.0
if degree == 'no':
ra = ra / 15.0
ra = degree2hours(ra)
dec = degree2hours(dec)
print >> sys.stdout, 'X = %6.3f%s' %(x, '\t'), ' Y = %6.3f' %y, ' RA = %s' %ra, ' DEC = %s' %dec
else:
print >> sys.stdout, 'X = %6.3f%s' %(x, '\t'), ' Y = %6.3f' %y, ' RA = %3.9f' %ra, ' DEC = %3.9f' %dec
return (x, y, ra, dec)
# Multicore process worker function
# =================================
def process_worker( s_q, r_q, ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22, hms ):
# Iterate through send queue, getting data to process, processing it
# and putting in the receive queue
for value in iter(s_q.get, 'STOP'):
# Open the input file
try:
ifile = open(value[0], 'r')
except:
print >> sys.stderr, 'Error: Not able to open input file ', value[0], '. Exiting.'
sys.exit(-1)
# Translate data from pixel to sky coordinates
ifile.seek(value[1])
result = []
for line in ifile.read(value[2]).splitlines():
if line[0] != '#':
result.append(translate(float(line.split()[0]), float(line.split()[1]), ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22, hms))
# Put result in the receive queue
r_q.put(result)
# Multicore pool worker function
# ==============================
def pool_worker(indata):
# Unpack the input python list
chunk0, chunk1, infile, ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22, hms = indata
# Open the input file
try:
ifile = open(infile, 'r')
except:
print >> sys.stderr, 'Error: Not able to open input file ', infile, '. Exiting.'
sys.exit(-1)
# Translate data from pixel to sky coordinates
ifile.seek(chunk0)
result = []
for line in ifile.read(chunk1).splitlines():
if line[0] != '#':
result.append(translate(float(line.split()[0]), float(line.split()[1]), ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22, hms))
# Return result back to the calling program
return result
# pix2sky routine to process x,y pixel pairs
# ==========================================
def pix2sky(image, infile, degree = 'yes', outfile = None, mode = 'process', ncpus = None, scheduler = 'guided'):
# Read image headers
ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22 = getHeader(image)
# Get number of processors (cores) on the machine. In case of Intel processors with
# hyperthreading, total number of processors will be equal to number of cores * number of threads/core
if ncpus:
n_cpus = int(ncpus)
else:
n_cpus = mp.cpu_count()
# Divide data in smaller chunks for better performance based on scheduler type
chunks = getchunks(infile, n_cpus, scheduler)
# Based on multiprocessing mode, follow seperate path
if mode == 'process':
send_q = Queue()
recv_q = Queue()
# Start number of processes equal to number of cores
for i in range(n_cpus):
Process(target = process_worker, args = (send_q, recv_q, ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22, degree)).start()
# Put data in the send queue
cnt = 0
for chunk in chunks:
cnt += 1
send_q.put((infile, chunk[0], chunk[1]))
# Set the output file name
if not outfile:
if len(infile.rsplit('/')) > 1:
outfile = join(infile.rsplit('/')[0], 'pix2sky.out')
else:
outfile = 'pix2sky.out'
# Open the output file to write the results
try:
ofile = open(outfile, 'w')
except:
print >> sys.stderr, 'Error: Not able to open outfile file ', outfile, '. Exiting.'
sys.exit(-1)
# Format and write records to output file
ofile.write('# ---------------------------------------------------------\n')
ofile.write('# X Y RA DEC \n')
ofile.write('# ---------------------------------------------------------\n')
# Write results to the output file
for i in range(cnt):
res = recv_q.get()
for value in res:
ofile.write('%10s%10s%18s%18s%s' %(str(value[0]), str(value[1]), str(value[2]), str(value[3]), '\n'))
# Close the output file
try:
ofile.close()
except:
print >> sys.stderr, 'Warning: Not able to close output file ', outfile
# Stop all the running processes
for i in range(n_cpus):
send_q.put('STOP')
print >> sys.stdout, '\n Results written to - ', outfile
else:
# Create input python list to be sent to process worker method
indata = []
for chunk in chunks:
indata.append((chunk[0], chunk[1], infile, ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22, degree))
# Set the output file name
if not outfile:
if len(infile.rsplit('/')) > 1:
outfile = join(infile.rsplit('/')[0], 'pix2sky.out')
else:
outfile = 'pix2sky.out'
# Open the output file
try:
ofile = open(outfile, 'w')
except:
print >> sys.stderr, 'Error: Not able to open the outfile file ', outfile, '. Exiting.'
sys.exit(-1)
# Write the headers to the output file
ofile.write('# ---------------------------------------------------------\n')
ofile.write('# X Y RA DEC \n')
ofile.write('# (pixel) (pixel) (degree) (degree)\n')
ofile.write('# ---------------------------------------------------------\n')
# To prevent memory overflow, process the input record and write output in chunks
for i in xrange(0, len(indata), n_cpus):
tmpdata = indata[ i : i + n_cpus ]
pool = mp.Pool(n_cpus)
result = pool.map(pool_worker, tmpdata)
for res in result:
for value in res:
ofile.write('%10s%10s%18s%18s%s' %(str(value[0]), str(value[1]), str(value[2]), str(value[3]), '\n'))
# Close the output file
try:
ofile.close()
except:
print >> sys.stderr, 'Warning: Not able to close the output file ', outfile
print >> sys.stdout, '\n Results written to file - ', outfile
# Main function - doing some data validation before calling pix2sky method
# ========================================================================
def main(image, pixelfile, degree = 'yes', outfile = None, mode = 'process', ncpus = None, scheduler = 'guided'):
# Check if the image exists
if not exists(image.split( '[', 1 )[0]):
print >> sys.stderr, 'Error: Image ', image, ' does not exist. Exiting.'
sys.exit(-1)
# Check if the input pixel file exists
if not exists(pixelfile):
print >> sys.stderr, 'Error: Pixel file ', pixelfile, ' does not exist. Exiting.'
sys.exit(-1)
# Execute the method
pix2sky(image, pixelfile, degree, outfile, mode, ncpus, scheduler)
# Entry point for PIX2SKY utility
# ===============================
if __name__ == '__main__':
usage = "Usage: %prog [options] image pixfile"
description = "Description. Utility to convert X/Y pixel image coordinates to RA/DEC sky coordinates in multiprocessing mode."
parser = OptionParser(usage = usage, version = "%prog 1.0", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-d", "--degree", dest = "degree", metavar="DEGREE",
action="store", help = "ra/dec in degree? [default is yes]",
choices=['yes', 'no'], default = 'yes'
)
parser.add_option("-f", "--filename", dest = "filename",
action='store', metavar="FILE", help = "output file name [default is pix2sky.out]"
)
parser.add_option("-m", "--mode", dest = "mode", metavar="MODE",
action="store", help = "multiprocessing mode (pool or process) [default is process]",
choices=['process', 'pool'], default = 'process'
)
parser.add_option("-n", "--ncpus", dest = "ncpus", metavar="NCPUS",
action="store", help = "number of cpus (cores) for processing [default is maximum cores on the machine]"
)
parser.add_option("-s", "--scheduler", dest = "scheduler", metavar="SCHEDULER",
action="store", help = "scheduler for multiprocessing [default is guided]",
choices=['guided', 'static'], default = 'guided'
)
(options, args) = parser.parse_args()
# Check for number of input arguments
if len( args ) != 2:
parser.error("Incorrect number of arguments")
print >> sys.stdout, '\n Starting processing...'
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
# Check if pyfits module is available
try:
import pyfits
except:
print >> sys.stderr, 'Error: Python module pyfits not found. Exiting.'
exit(-1)
main(args[0], args[1], options.degree, options.filename, options.mode, options.ncpus, options.scheduler)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout
print >> sys.stdout, '\n Process completed successfully.'
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyStateDelete class that deletes virtual network topology and its state.
#
from __future__ import absolute_import
import json
import os
import sys
import subprocess
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
import happy.HappyNodeDelete
import happy.HappyNetworkDelete
import happy.HappyLinkDelete
import happy.HappyNode
import happy.HappyInternet
from happy.HappyHost import HappyHost
from six.moves import input
options = {}
options["quiet"] = False
options["force"] = False
options["all"] = False
def option():
return options.copy()
class HappyStateDelete(HappyHost):
"""
Deletes the current network topology state. This only delete nodes, networks, and
links found in the current state file.
happy-state-delete [-h --help] [-q --quiet] [-f --force] [-a --all]
-f --force Optional. Turns off all deletion confirmations. WARNING: We do not
recommend using this option, as it could delete critical non-Happy host
network resources.
-a --all Optional. Deletes all network namespace and links on the host system.
Asks for confirmation before deleting non-Happy network resources.
Examples:
$ happy-state-delete
Preferred usage. Deletes the current state.
$ happy-state-delete -a
Use only if there's a networking issue that can't be resolved with
happy-state-delete alone. Respond to deletion requests with "no" if you are
unsure whether the network resource should be deleted.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
HappyHost.__init__(self)
self.quiet = opts["quiet"]
self.force = opts["force"]
self.all = opts["all"]
def __pre_check(self):
lock_manager = self.getStateLockManager()
lock_manager.break_lock()
def __post_check(self):
emsg = "Delete Happy state completed."
self.logger.debug("[localhost] HappyStateDelete: %s" % (emsg))
def __delete_state(self):
for node_id in self.getNodeIds():
options = happy.HappyNodeDelete.option()
options["quiet"] = self.quiet
options["node_id"] = node_id
cmd = happy.HappyNodeDelete.HappyNodeDelete(options)
ret = cmd.run()
self.readState()
for network_id in self.getNetworkIds():
options = happy.HappyNetworkDelete.option()
options["quiet"] = self.quiet
options["network_id"] = network_id
cmd = happy.HappyNetworkDelete.HappyNetworkDelete(options)
ret = cmd.run()
self.readState()
for link_id in self.getLinkIds():
options = happy.HappyLinkDelete.option()
options["quiet"] = self.quiet
options["link_id"] = link_id
cmd = happy.HappyLinkDelete.HappyLinkDelete(options)
ret = cmd.run()
self.readState()
def __delete_state_file(self):
if os.path.isfile(self.state_file):
os.remove(self.state_file)
def __delete_host_netns(self):
for node_id in self.getHostNamespaces():
delete_it = False
if self.force is True:
delete_it = True
else:
reply = str(input("Delete host namespace " + node_id + " (y/N): ")).lower().strip()
if reply[0] == 'y':
delete_it = True
if delete_it:
options = happy.HappyNodeDelete.option()
options["quiet"] = self.quiet
options["node_id"] = node_id
cmd = happy.HappyNodeDelete.HappyNodeDelete(options)
ret = cmd.run()
else:
emsg = "Leaving host namespace %s as it is." % (node_id)
self.logger.error("[localhost] HappyStateDelete: %s" % (emsg))
self.readState()
def __delete_host_bridges(self):
for network_id in self.getHostBridges():
delete_it = False
if self.force is True:
delete_it = True
else:
reply = str(input("Delete host bridge " + network_id + " (y/N): ")).lower().strip()
if reply[0] == 'y':
delete_it = True
if delete_it:
options = happy.HappyNetworkDelete.option()
options["quiet"] = self.quiet
options["network_id"] = network_id
cmd = happy.HappyNetworkDelete.HappyNetworkDelete(options)
ret = cmd.run()
else:
emsg = "Leaving host bridge %s as it is." % (network_id)
self.logger.error("[localhost] HappyStateDelete: %s" % (emsg))
self.readState()
def __delete_host_links(self):
for link_id in self.getHostInterfaces():
delete_it = False
if self.force is True:
delete_it = True
else:
reply = str(input("Delete host link " + link_id + " (y/N): ")).lower().strip()
if len(reply) > 0 and reply[0] == 'y':
delete_it = True
if delete_it:
options = happy.HappyLinkDelete.option()
options["quiet"] = self.quiet
options["link_id"] = link_id
cmd = happy.HappyLinkDelete.HappyLinkDelete(options)
ret = cmd.run()
else:
emsg = "Leaving host link %s as it is." % (link_id)
self.logger.error("[localhost] HappyStateDelete: %s" % (emsg))
self.readState()
def __cleanup_host(self):
self.__delete_host_netns()
self.__delete_host_bridges()
self.__delete_host_links()
def __delete_internet(self):
"""
delete internet isp interface
functionality similar to command: happy-internet -d ...
"""
#get global config before it is deleted
self.global_config = self.getGlobal()
if "internet" in self.global_config:
internet_config = self.global_config["internet"]
for internet_value in internet_config.values():
options = happy.HappyInternet.option()
options["delete"] = True
options["iface"] = internet_value["iface"]
options["isp"] = internet_value["isp"]
options["seed"] = internet_value["isp_addr"].split(".")[2]
options["node_id"] = internet_value["node_id"]
hi = happy.HappyInternet.HappyInternet(options)
hi.start()
def run(self):
self.__pre_check()
self.__delete_internet()
self.__delete_state()
self.__delete_state_file()
if self.all:
self.__cleanup_host()
self.__post_check()
return ReturnMsg(0)
|
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import sys
from mixbox.binding_utils import *
from . import cybox_common
from . import gui_object
class GUIDialogboxObjectType(gui_object.GUIObjectType):
"""The GUIDialogboxObjectType type is intended to characterize GUI
dialog boxes."""
subclass = None
superclass = gui_object.GUIObjectType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, Height=None, Width=None, Box_Caption=None, Box_Text=None):
super(GUIDialogboxObjectType, self).__init__(object_reference, Custom_Properties, Height, Width, )
self.Box_Caption = Box_Caption
self.Box_Text = Box_Text
def factory(*args_, **kwargs_):
if GUIDialogboxObjectType.subclass:
return GUIDialogboxObjectType.subclass(*args_, **kwargs_)
else:
return GUIDialogboxObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Box_Caption(self): return self.Box_Caption
def set_Box_Caption(self, Box_Caption): self.Box_Caption = Box_Caption
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Box_Text(self): return self.Box_Text
def set_Box_Text(self, Box_Text): self.Box_Text = Box_Text
def hasContent_(self):
if (
self.Box_Caption is not None or
self.Box_Text is not None or
super(GUIDialogboxObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='GUIDialogBoxObj:', name_='GUIDialogboxObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='GUIDialogboxObjectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='GUIDialogBoxObj:', name_='GUIDialogboxObjectType'):
super(GUIDialogboxObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='GUIDialogboxObjectType')
def exportChildren(self, lwrite, level, namespace_='GUIDialogBoxObj:', name_='GUIDialogboxObjectType', fromsubclass_=False, pretty_print=True):
super(GUIDialogboxObjectType, self).exportChildren(lwrite, level, 'GUIDialogBoxObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Box_Caption is not None:
self.Box_Caption.export(lwrite, level, 'GUIDialogBoxObj:', name_='Box_Caption', pretty_print=pretty_print)
if self.Box_Text is not None:
self.Box_Text.export(lwrite, level, 'GUIDialogBoxObj:', name_='Box_Text', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(GUIDialogboxObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Box_Caption':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Box_Caption(obj_)
elif nodeName_ == 'Box_Text':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.set_Box_Text(obj_)
super(GUIDialogboxObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class GUIDialogboxObjectType
GDSClassesMapping = {
'Build_Utility': cybox_common.BuildUtilityType,
'Errors': cybox_common.ErrorsType,
'Time': cybox_common.TimeType,
'Width': cybox_common.IntegerObjectPropertyType,
'Certificate_Issuer': cybox_common.StringObjectPropertyType,
'Metadata': cybox_common.MetadataType,
'Hash': cybox_common.HashType,
'Information_Source_Type': cybox_common.ControlledVocabularyStringType,
'Block_Hash_Value': cybox_common.HashValueType,
'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType,
'SubDatum': cybox_common.MetadataType,
'Segment_Hash': cybox_common.HashValueType,
'Digital_Signature': cybox_common.DigitalSignatureInfoType,
'Code_Snippets': cybox_common.CodeSnippetsType,
'Value': cybox_common.StringObjectPropertyType,
'Length': cybox_common.IntegerObjectPropertyType,
'Certificate_Subject': cybox_common.StringObjectPropertyType,
'Encoding': cybox_common.ControlledVocabularyStringType,
'Internationalization_Settings': cybox_common.InternationalizationSettingsType,
'Tool_Configuration': cybox_common.ToolConfigurationType,
'English_Translation': cybox_common.StringObjectPropertyType,
'Functions': cybox_common.FunctionsType,
'String_Value': cybox_common.StringObjectPropertyType,
'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType,
'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType,
'System': cybox_common.ObjectPropertiesType,
'Platform': cybox_common.PlatformSpecificationType,
'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType,
'Type': cybox_common.ControlledVocabularyStringType,
'Compilers': cybox_common.CompilersType,
'Tool_Type': cybox_common.ControlledVocabularyStringType,
'String': cybox_common.ExtractedStringType,
'Tool': cybox_common.ToolInformationType,
'Build_Information': cybox_common.BuildInformationType,
'Tool_Hashes': cybox_common.HashListType,
'Error_Instances': cybox_common.ErrorInstancesType,
'Data_Segment': cybox_common.StringObjectPropertyType,
'GUI_Object': gui_object.GUIObjectType,
'Language': cybox_common.StringObjectPropertyType,
'Property': cybox_common.PropertyType,
'Strings': cybox_common.ExtractedStringsType,
'File_System_Offset': cybox_common.IntegerObjectPropertyType,
'Reference_Description': cybox_common.StructuredTextType,
'Usage_Context_Assumption': cybox_common.StructuredTextType,
'Code_Snippet': cybox_common.ObjectPropertiesType,
'Configuration_Settings': cybox_common.ConfigurationSettingsType,
'Simple_Hash_Value': cybox_common.SimpleHashValueType,
'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType,
'Instance': cybox_common.ObjectPropertiesType,
'Import': cybox_common.StringObjectPropertyType,
'Identifier': cybox_common.PlatformIdentifierType,
'Tool_Specific_Data': cybox_common.ToolSpecificDataType,
'Execution_Environment': cybox_common.ExecutionEnvironmentType,
'Search_Distance': cybox_common.IntegerObjectPropertyType,
'Dependencies': cybox_common.DependenciesType,
'Offset': cybox_common.IntegerObjectPropertyType,
'Date': cybox_common.DateRangeType,
'Hashes': cybox_common.HashListType,
'Segments': cybox_common.HashSegmentsType,
'Segment_Count': cybox_common.IntegerObjectPropertyType,
'Box_Caption': cybox_common.StringObjectPropertyType,
'Block_Hash': cybox_common.FuzzyHashBlockType,
'Dependency': cybox_common.DependencyType,
'Error': cybox_common.ErrorType,
'Trigger_Point': cybox_common.HexBinaryObjectPropertyType,
'Environment_Variable': cybox_common.EnvironmentVariableType,
'Byte_Run': cybox_common.ByteRunType,
'Contributors': cybox_common.PersonnelType,
'Image_Offset': cybox_common.IntegerObjectPropertyType,
'Imports': cybox_common.ImportsType,
'Library': cybox_common.LibraryType,
'Height': cybox_common.IntegerObjectPropertyType,
'References': cybox_common.ToolReferencesType,
'Internal_Strings': cybox_common.InternalStringsType,
'Custom_Properties': cybox_common.CustomPropertiesType,
'Configuration_Setting': cybox_common.ConfigurationSettingType,
'Libraries': cybox_common.LibrariesType,
'Box_Text': cybox_common.StringObjectPropertyType,
'Function': cybox_common.StringObjectPropertyType,
'Description': cybox_common.StructuredTextType,
'User_Account_Info': cybox_common.ObjectPropertiesType,
'Build_Configuration': cybox_common.BuildConfigurationType,
'Address': cybox_common.HexBinaryObjectPropertyType,
'Search_Within': cybox_common.IntegerObjectPropertyType,
'Segment': cybox_common.HashSegmentType,
'Compiler': cybox_common.CompilerType,
'Name': cybox_common.StringObjectPropertyType,
'Signature_Description': cybox_common.StringObjectPropertyType,
'Block_Size': cybox_common.IntegerObjectPropertyType,
'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType,
'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType,
'Dependency_Description': cybox_common.StructuredTextType,
'Contributor': cybox_common.ContributorType,
'Tools': cybox_common.ToolsInformationType,
'Data_Size': cybox_common.DataSizeType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'GUI_Dialogbox'
rootClass = GUIDialogboxObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'GUI_Dialogbox'
rootClass = GUIDialogboxObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from mixbox.vendor.six import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'GUI_Dialogbox'
rootClass = GUIDialogboxObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_="GUI_Dialogbox",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"GUIDialogboxObjectType"
]
|
|
import os
import sys
import stat
import fnmatch
import threading
import traceback
from queue import Queue
from threading import Thread, Event
from collections import defaultdict
from functools import wraps, partial, reduce
from itertools import filterfalse, chain
import sublime
# The primary DB is indexed on the fly by cscope
# and can therefore only contain a limited amount of files
# before the indexing time becomes noticable. For projects of
# size up to TWO_TIER_THRESHOLD we keep all the files in the primary SB
PRIMARY_DB = 'primary'
# For projects larger than TWO_TIER_THRESHOLD, we use a two tier solution
# instead. The primary DB is still indexed on the fly but now only contains
# files that are open in the editor and have been modified since the last
# indexing run of the secondary DB. The secondary DB will then contain all
# the files in the project, but will be indexed less frequently so it will
# most likely be out of date, for the files being modified. That is ok since
# the primary DB will hold up to date information for those files.
SECONDARY_DB = 'secondary'
from ..SublimeCscope import DEBUG, PACKAGE_NAME
from . import settings
from . import cscope_runner
DEBUG_DECORATORS = False
DEBUG_INDEXERCONFIG = False
DB_FOLDER_POSTFIX = '-' + PACKAGE_NAME.lower()
TWO_TIER_THRESHOLD = 50
# The global dict of indexers
# There should be one per project or workspace
_indexers = {}
_indexers_by_win = {}
class ActorQuit(Exception):
pass
class UnhandledMessageException(Exception):
pass
class ActorCommandMsg():
def __init__(self, action, wait_for_result=False, result_callback=None):
self._action = action
self._result = Queue() if wait_for_result else None
self._result_callback = result_callback
def _set_result(self, result):
if self._result:
self._result.put(result)
elif isinstance(result, Exception):
raise result
elif self._result_callback:
self._result_callback(result)
def result(self):
if self._result:
res = self._result.get()
if isinstance(res, Exception):
raise res
return res
else:
return None
def run(self):
try:
res = self._action()
except Exception as e:
res = e
finally:
self._set_result(res)
# Decorator that hides the details of sending messages to Actors
def send_msg(func):
@wraps(func)
def wrapper(self, *args, **kwds):
result_cb = None
is_sync = False
send_always = False
#make sure the Actor is started
self.start()
if not self._is_started():
raise AssertionError("Actor %s is not running" % self.__class__)
is_external = bool(self._thread_id and self._thread_id != threading.get_ident())
#strip away any arguments aimed for the decorator
if kwds:
result_cb = kwds.pop('result_callback', None)
is_sync = kwds.pop('wait_for_result', False)
send_always = kwds.pop('send_always', False)
#deadly combo, that will cause a deadlock in the actor
if send_always and is_sync and not is_external:
raise AssertionError("You can't send a message to yourself and wait for the result!")
if send_always or is_external:
action = lambda: func(self, *args, **kwds)
msg = ActorCommandMsg(action, wait_for_result=is_sync, result_callback=result_cb)
if DEBUG_DECORATORS:
print("Sending %s msg: %s" % ('sync' if is_sync else 'async', func.__name__))
self.send(msg)
return msg.result()
if DEBUG_DECORATORS: print("Calling %s directly" % func.__name__)
return func(self, *args, **kwds)
return wrapper
class ActorBase:
def __init__(self):
self._mailbox = Queue()
self._started = Event()
self._terminated = Event()
self._thread_id = 0
self.recv_count = 0
def send(self, msg):
self._mailbox.put(msg)
def recv(self):
msg = self._mailbox.get()
self.recv_count += 1
if msg is ActorQuit:
raise ActorQuit()
return msg
def _close(self):
self.send(ActorQuit)
def _join(self):
self._terminated.wait()
def _bootstrap(self):
try:
self._thread_id = threading.get_ident()
self._started.set()
self._run()
except ActorQuit:
pass
finally:
self._thread_id = 0
self._started.clear()
self._terminated.set()
def _run(self):
while True:
msg = self.recv()
if isinstance(msg, ActorCommandMsg):
msg.run()
else:
self.handle_message(msg)
def _is_started(self):
return self._started.is_set() and not self._terminated.is_set()
def handle_message(self, msg):
raise UnhandledMessageException(msg)
def quit(self):
self._close()
self._join()
def start(self):
if self._is_started():
return
self._terminated.clear()
t = Thread(target=self._bootstrap)
t.daemon = True
t.start()
class Indexer(ActorBase):
""" The indexers maintains the cscope indexes
The Indexer is responsible for maintaining an up-to-date
cscope index of the project it is associated with.
"""
def __init__(self):
super().__init__()
self._crawler = Crawler()
self._crawl_in_progress = False
self._partial_crawl_queue = []
self._index_timestamp = None
self._two_tier_mode = False
self._file_index = {}
self._promotion_set = set()
self._demotion_set = set()
self._config = None
self._force_rebuild_db = False
def start(self):
super().start()
self._crawler.start()
def quit(self):
self._crawler.quit()
super().quit()
def _reset_results(self):
self._two_tier_mode = False
self._partial_crawl_queue.clear()
self._file_index.clear()
self._promotion_set.clear()
self._demotion_set.clear()
def _count_files(self, file_index):
return reduce(lambda tot, i: tot + len(i['files']), file_index.values(), 0)
def _write_file_list(self, files, file_name):
# Only try to create our own folder
if not os.path.exists(os.path.dirname(file_name)):
os.mkdir(os.path.dirname(file_name))
with open(file_name, mode='wt', encoding='utf-8') as file_list:
flist = ['"' + f + '"' if ' ' in f else f for f in files]
flist.append('\n')
file_list.write('\n'.join(flist))
def _gen_index(self, full_update=True):
success = False
try:
primary_list = os.path.join(self._config.db_location, PRIMARY_DB + '.files')
secondary_list = os.path.join(self._config.db_location, SECONDARY_DB + '.files')
#generate the file list
files = []
for v in self._file_index.values():
if v['files']:
files.extend(map(lambda f: os.path.join(v['path'], f), v['files']))
if self._two_tier_mode:
if self._promotion_set:
self._write_file_list(self._promotion_set, primary_list)
elif os.path.exists(primary_list):
os.remove(primary_list)
if full_update:
self._write_file_list(files, secondary_list)
cscope_runner.generate_index(self._config.db_location,
_find_window_from_indexer(self),
force_rebuild=self._force_rebuild_db)
self._force_rebuild_db = False
else:
self._write_file_list(files, primary_list)
if os.path.exists(secondary_list):
os.remove(secondary_list)
success = True
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print("%s: Generating index for project: %s caused an exception")
print(''.join('!! ' + line for line in lines))
return success
@send_msg
def _perform_crawl(self, partial_crawl=False):
start_path = None
if not self._config or not self._config.is_complete:
return
if self._crawl_in_progress:
print("Project: '%s' refresh is already in progress" % self._config.db_location)
return
elif partial_crawl:
#try to find a starting point that includes all paths in
#self._partial_crawl_queue
start_path = os.path.commonprefix(self._partial_crawl_queue)
if start_path.endswith(os.path.sep):
start_path = start_path[:-1]
if start_path and not os.path.exists(start_path):
start_path = os.path.dirname(start_path)
base_path, _ = self._config.find_base_path(start_path)
if start_path and not base_path:
start_path = None
if DEBUG:
if start_path:
print("Performing partial refresh starting from %s for project: %s" %
(start_path, self._config.db_location))
else:
print("Performing full refresh for project: %s" % self._config.db_location)
self._partial_crawl_queue.clear()
self._crawl_in_progress = True
self._crawler.crawl(self._config,
user_data=start_path,
start_path=start_path,
result_callback=self._crawl_result_ready)
@send_msg
def _crawl_result_ready(self, result):
self._crawl_in_progress = False
crawl_res, partial_update = result
if DEBUG:
print("Crawl results received. Found %d files" % self._count_files(crawl_res))
if self._count_files(crawl_res) > TWO_TIER_THRESHOLD:
if not self._two_tier_mode:
if partial_update:
print("%s: A partial update of project: %s resulted in threshold exceeded. "
"Performing full update." %
(PACKAGE_NAME, os.path.dirname(self._config.db_location)))
self._perform_crawl()
return
else:
if DEBUG: print("Threshold exceeded, switching to two tier mode")
self._reset_results()
self._two_tier_mode = True
elif not partial_update and self._two_tier_mode:
if DEBUG: print("%s: Project: %s. Project size is below threshold. "
"Reverting back to one tier mode" %
(PACKAGE_NAME, os.path.dirname(self._config.db_location)))
self._reset_results()
file_index = {}
if partial_update:
# Extract the relevant subset to compare
for k, v in list(self._file_index.values()):
if k['path'].startswith(partial_update):
file_index[k] = v
del self._file_index[k]
else:
file_index = self._file_index
self._file_index = {}
self._partial_crawl_queue.clear()
partial_update = ''
if (file_index != crawl_res):
if DEBUG:
print("Crawl of project: %s contained changes." %
os.path.dirname(self._config.db_location))
self._file_index.update(crawl_res)
if self._gen_index():
#remove files from the demotion list
tmp = {f for f in self._demotion_set if f.startswith(partial_update)}
self._demotion_set -= tmp
self._promotion_set -= tmp
# Perfrom any pending partial crawls
if self._partial_crawl_queue:
self._perform_crawl(partial_crawl=True, send_always=True)
@send_msg
def refresh(self):
self._force_rebuild_db = True
self._perform_crawl()
@send_msg
def set_config(self, config):
if config and config != self._config:
if DEBUG: print("New config received. Refreshing project %s" % config.db_location)
self._config = config
self.refresh()
@send_msg
def promote_buffer(self, file_path):
if file_path in self._promotion_set:
return
base, name = os.path.split(file_path)
st = os.stat(base)
if st.st_ino in self._file_index:
# in case the folder exists in the index under a different name
# use that name instead
base = self._file_index[st.st_ino]['path']
file_path = os.path.join(base, name)
if file_path in self._promotion_set:
return
if not self._config.file_matches(base, name):
return
if DEBUG: print("Promoting: %s" % file_path)
if self._two_tier_mode:
self._promotion_set.add(os.path.join(base, name))
self._gen_index(full_update=False)
elif not name in self._file_index.get(st.st_ino, {}).get('files',[]):
# file not found in index
self._perform_crawl()
@send_msg
def demote_buffer(self, file_path):
if file_path not in self._promotion_set:
return
if file_path in self._demotion_set:
return
if DEBUG: print("Demoting: %s" % file_path)
self._demotion_set.add(file_path)
self._partial_crawl_queue.append(os.path.dirname(file_path))
self._perform_crawl(True, send_always=True)
class Crawler(ActorBase):
""" The Crawler scans the project folders for files to index. """
@send_msg
def crawl(self, config, user_data, start_path=None):
result = defaultdict(dict)
if start_path:
base_path, follow_syms = config.find_base_path(start_path)
folders_to_search = [(start_path, base_path, follow_syms)]
else:
folders_to_search = [(base_path, base_path, follow_syms) for
base_path, follow_syms in config.base_paths()]
for start, base, follow_syms in folders_to_search:
os_walk = partial(os.walk, followlinks=follow_syms)
os_stat = partial(os.stat, follow_symlinks=follow_syms)
file_matcher = partial(config.file_matches, base_path=base)
folder_matcher = partial(config.folder_matches, base_path=base)
visited_files = set()
self._crawl_one_subfolder(start, result,
os_walk, os_stat,
file_matcher, folder_matcher,
visited_files)
return (result, user_data)
def _crawl_one_subfolder(self, start_path, result, os_walk,
os_stat, file_matcher,
folder_matcher, visited_files):
start_path = os.path.normpath(start_path)
if DEBUG: print("Starting to crawl folder: %s" % start_path)
prev = None
prev_inode = 0
for current, subdirs, files in os_walk(start_path):
inode = prev_inode
if current != prev:
prev = current
inode = os_stat(current).st_ino
if inode in result:
AssertionError("Inode %d already seen. path: %s == %s" %
(inode, current, result[inode]['path']))
result[inode]['path'] = current
result[inode]['magic'] = 0
result[inode]['files'] = []
self._process_files(current, files, result[inode],
os_stat, file_matcher, visited_files)
self._process_subfolders(current, subdirs, os_stat,
folder_matcher, result.keys())
def _process_files(self, path, files, result,
os_stat, file_matcher, visited_files):
for f in files:
try:
st = os_stat(os.path.join(path, f))
except (FileNotFoundError, OSError) as e:
print("%s: %s" % (PACKAGE_NAME, e))
continue
if st.st_ino in visited_files:
if DEBUG: print("File %s was already visited" % os.path.join(path, f))
continue
if file_matcher(path, f, st.st_mode):
result['files'].append(f)
result['magic'] += st.st_size + st.st_mtime
visited_files.add(st.st_ino)
def _process_subfolders(self, path, subdirs, os_stat,
folder_matcher, visited_folders):
filtered_subdirs = []
for d in subdirs:
try:
st = os_stat(os.path.join(path, d))
except (FileNotFoundError, OSError) as e:
print("%s: %s" % (PACKAGE_NAME, e))
continue
if st.st_ino in visited_folders:
if DEBUG: print("File %s was already visited" % os.path.join(path, d))
continue
if folder_matcher(path, d, st.st_mode):
filtered_subdirs.append(d)
subdirs.clear()
subdirs.extend(filtered_subdirs)
class IndexerConfig():
def __init__(self, window):
self._is_complete = False
self._file_exts = None
self._db_location = get_db_location(window)
if not self._db_location:
return
self._file_exts = _set_from_sorted_list(settings.get('index_file_extensions', window))
if not self._file_exts:
print("%s: The list of file extensions to index was empty. \
Please check your settings." % PACKAGE_NAME)
return
self._search_std_incl_folders = settings.get('search_std_include_folders', window)
self._std_incl_folders = _set_from_sorted_list(settings.get('std_include_folders', window))
self._folder_configs = {}
self._index_blacklist = set()
global_folder_exclude = []
global_folder_include = []
global_file_exclude = []
global_file_include = []
if window.active_view():
s = window.active_view().settings()
self._index_blacklist = _set_from_sorted_list(s.get('index_exclude_patterns', []))
global_folder_exclude = s.get('folder_exclude_patterns', [])
global_folder_include = s.get('folder_include_patterns', [])
global_file_exclude = s.get('file_exclude_patterns', [])
global_file_include = s.get('file_include_patterns', [])
proj_data = window.project_data()
for folder in proj_data['folders']:
folder_path = folder['path']
if not folder_path:
next
if not os.path.isabs(folder_path):
base_path, _ = os.path.split(self._db_location)
if DEBUG:
print("Found relative folder: %s. prepending %s" %
(folder_path, base_path + os.path.sep))
folder_path = os.path.join(base_path, folder_path)
folder_config = {}
folder_config['follow_symlinks'] = folder.get('follow_symlinks', True)
folder_config['file_whitelist'] = _set_from_sorted_list(global_file_include + \
folder.get('file_include_patterns',[]))
folder_config['file_blacklist'] = _set_from_sorted_list(global_file_exclude + \
folder.get('file_exclude_patterns',[]))
folder_config['folder_whitelist'] = _set_from_sorted_list(global_folder_include + \
folder.get('folder_include_patterns',[]))
folder_config['folder_blacklist'] = _set_from_sorted_list(global_folder_exclude + \
folder.get('folder_exclude_patterns',[]))
self._folder_configs[folder_path] = folder_config
# For the config to be consider complete (i.e. usable) we need at least
# one file extention and one folder.
self._is_complete = len(self._file_exts) > 0 and len(self._folder_configs) > 0
@property
def is_complete(self):
return self._is_complete
@property
def file_exts(self):
return self._file_exts
@property
def db_location(self):
return self._db_location
@property
def search_std_incl_folders(self):
return self._search_std_incl_folders
@property
def std_incl_folders(self):
return self._std_incl_folders
def __eq__(self, r):
res = True
if self is r:
return True
elif not isinstance(r, self.__class__):
res = NotImplemented
else:
keys_to_cmp = [
'_is_complete',
'_db_location',
'_file_exts',
'_folder_configs',
'_index_blacklist',
'_search_std_incl_folders',
'_std_incl_folders'
]
ldict = self.__dict__
rdict = r.__dict__
results = list(filterfalse(lambda k: ldict.get(k, None) == rdict.get(k, None),
keys_to_cmp))
# if results is empty, all keys evaluated to equal
res = bool(not results)
if DEBUG_INDEXERCONFIG and not res:
for key in results:
print("%s failed: '%s' != '%s'" %
(key, ldict.get(key, None), rdict.get(key, None)))
return res
def __ne__(self, r):
res = self.__eq__(r)
if res is NotImplemented:
return res
return not res
def _is_whitelisted_file(self, base_path, dirpath, file_name):
_, ext = os.path.splitext(file_name)
if not ext in self._file_exts:
return False
full_name = os.path.join(dirpath, file_name)
include_patterns = self._folder_configs[base_path]['file_whitelist']
# if the list is empty then all files are allowed
if not include_patterns:
return True
for pattern in include_patterns:
if fnmatch.fnmatch(file_name, pattern):
return True
if fnmatch.fnmatch(full_name, pattern):
return True
return False
def _is_blacklisted_file(self, base_path, dirpath, file_name):
exclude_patterns = self._folder_configs[base_path]['file_blacklist']
# if the list is empty then all files are allowed
if not exclude_patterns:
return False
full_name = os.path.join(dirpath, file_name)
for pattern in exclude_patterns:
if fnmatch.fnmatch(file_name, pattern):
return True
if fnmatch.fnmatch(full_name, pattern):
return True
for pattern in self._index_blacklist:
if fnmatch.fnmatch(file_name, pattern):
return True
if fnmatch.fnmatch(full_name, pattern):
return True
return False
def _is_whitelisted_folder(self, base_path, dirpath, folder):
include_patterns = self._folder_configs[base_path]['folder_whitelist']
# if the list is empty then all files are allowed
if not include_patterns:
return True
full_path = os.path.join(dirpath, folder)
for pattern in include_patterns:
if fnmatch.fnmatch(folder, pattern):
return True
if fnmatch.fnmatch(full_path, pattern):
return True
return False
def _is_blacklisted_folder(self, base_path, dirpath, folder):
exclude_patterns = self._folder_configs[base_path]['folder_blacklist']
# if the list is empty then all files are allowed
if not exclude_patterns:
return False
full_path = os.path.join(dirpath, folder)
for pattern in exclude_patterns:
if fnmatch.fnmatch(folder, pattern):
return True
if fnmatch.fnmatch(full_path, pattern):
return True
for pattern in self._index_blacklist:
if fnmatch.fnmatch(folder, pattern):
return True
if fnmatch.fnmatch(full_path, pattern):
return True
return False
def find_base_path(self, dirpath):
not_found = (None, None)
if not dirpath:
return not_found
for bp in self._folder_configs.keys():
if dirpath.startswith(bp):
return (bp, self._folder_configs[bp]['follow_symlinks'])
if DEBUG:
print("No base path found for '%s' in (%s)" % (dirpath, self._folder_configs.keys()))
return not_found
def base_paths(self):
return tuple((key, self._folder_configs[key]['follow_symlinks'])
for key in self._folder_configs.keys())
def file_matches(self, dirpath, file_name, st_mode=0, base_path=None):
if not base_path:
base_path, follow_symlinks = self.find_base_path(dirpath)
if not base_path:
return False
st_mode = os.stat(os.path.join(dirpath, file_name),
follow_symlinks=follow_symlinks).st_mode
if not stat.S_ISREG(st_mode):
return False
if not self._is_whitelisted_file(base_path, dirpath, file_name):
return False
if self._is_blacklisted_file(base_path, dirpath, file_name):
return False
return True
def folder_matches(self, dirpath, folder, st_mode=0, base_path=None):
if not base_path:
base_path, follow_symlinks = self.find_base_path(dirpath)
if not base_path:
return False
st_mode = os.stat(os.path.join(dirpath, file_name), follow_symlinks=follow_symlinks)
if not stat.S_ISDIR(st_mode):
return False
if not self._is_whitelisted_folder(base_path, dirpath, folder):
return False
if self._is_blacklisted_folder(base_path, dirpath, folder):
return False
return True
# The folder where we store cscope indexes for workspaces (since they have no project
# folder associated with them.)
def _get_tmp_db_folder():
return os.path.join(sublime.cache_path(), PACKAGE_NAME, 'workspace_tmp')
def _get_proj_name(view_or_window):
proj_name = None
win = view_or_window
if hasattr(view_or_window, 'window'):
win = view_or_window.window()
# we are only interested in windows with folders open
if win and win.folders():
proj_name = win.project_file_name()
# if the window doesn't have a proj_name, generate a dummy_one
if not proj_name:
proj_name = os.path.join(_get_tmp_db_folder(), 'workspace_' + str(win.id()))
return proj_name
def _set_from_sorted_list(l):
if not l:
return set()
l.sort()
return set(l)
def _disassociate_window(proj_file, win):
indexer_data = _indexers.get(proj_file, None)
if indexer_data:
indexer_data['windows'].remove(win)
if not indexer_data['windows']:
return True
return False
def _trim_indexers():
for key, indexer_data in list(_indexers.items()):
# remove indexers that are not associated with any windows
if not indexer_data['windows']:
indexer = _indexers.pop(key)['indexer']
indexer.quit()
def _find_window_from_proj_file(proj_file):
win = None
if proj_file in _indexers:
indexer_data = _indexers[proj_file]
windows = [w for w in sublime.windows() if w.id() in indexer_data['windows']]
if windows:
win = windows[0]
return win
def _find_window_from_indexer(indexer):
win = None
for proj_file, indexer_data in _indexers.items():
if indexer is indexer_data['indexer']:
win = _find_window_from_proj_file(proj_file)
return win
# The module level API
def get_db_location(win):
if not win:
return None
proj_name = _get_proj_name(win)
if not proj_name:
return None
path, name_ext = os.path.split(proj_name)
if not os.path.exists(path):
print("%s: Path: %s does not exist. Will not attempt to index project: %s"
% (PACKAGE_NAME, path, proj_name))
return None
name, ext = os.path.splitext(name_ext)
db_location = os.path.join(path, name + DB_FOLDER_POSTFIX)
if os.path.isfile(db_location):
print("%s: Path: %s already exists but is not a folder. \
Will not attempt to index project: %s" % (PACKAGE_NAME, db_location, proj_name))
return None
return db_location
def refresh(win=None, explicit_refresh=True):
"""
Refresh the file tree of the indexer belonging to window
if win is None refresh all indexers.
"""
tmp_folder = _get_tmp_db_folder()
if os.path.isfile(tmp_folder):
print("%s: %s exists but is not a folder. Removing" % (PACKAGE_NAME, tmp_folder))
os.remove(tmp_folder)
if not os.path.exists(tmp_folder):
print("%s: Creating tmp folder: %s." % (PACKAGE_NAME, tmp_folder))
os.makedirs(tmp_folder, exist_ok=True)
windows = [win] if win else sublime.windows()
indexer_win_pair = [(_get_proj_name(win), win) for win in windows
if _get_proj_name(win)]
for proj_file, win in indexer_win_pair:
# in case the window is being reused with a new project,
# disassociate from the old project
if win.id() in _indexers_by_win and _indexers_by_win[win.id()] != proj_file:
_disassociate_window(_indexers_by_win[win.id()], win.id())
indexer_data = _indexers.setdefault(proj_file, {})
indexer = indexer_data.setdefault('indexer', Indexer())
indexer_cfg = IndexerConfig(win)
if indexer_cfg != indexer_data.get('config', None):
# Since there is a change in the config
# The indexer will do an implicit refresh
explicit_refresh = False
indexer.set_config(indexer_cfg)
indexer_data['config'] = indexer_cfg
indexer_windows = indexer_data.setdefault('windows', [])
if not win.id() in indexer_windows:
indexer_windows.append(win.id())
_indexers_by_win[win.id()] = proj_file
indexer.start()
if explicit_refresh:
indexer.refresh()
def buffer_promoted(file_path):
"""
The file located at 'file_path' has been opened and modified and should
therefore be promoted to the indexers' active list.
"""
# Special case were the file is a project file
if file_path in _indexers:
sublime.set_timeout_async(lambda: settings_changed(file_path), 1000)
return
# Notify all indexers that the buffer should be promoted
# The indexers will ignore this call if the buffer doesn't belong to their
# project
for indexer_data in _indexers.values():
indexer_data['indexer'].promote_buffer(file_path)
if DEBUG: print("buffer_promoted: '%s'" % file_path)
def buffer_demoted(file_path):
"""
The file located at 'file_path' has been closed and should therefore
be demoted to the indexers' passive list.
"""
#ignore any project files being closed
if file_path in _indexers:
return
for indexer_data in _indexers.values():
indexer_data['indexer'].demote_buffer(file_path)
def window_state_changed():
"""
Called every time there is a significant state change in the currently
open windows and we need to take action.
"""
# look for any indexers to close
curr_windows = {win.id() for win in sublime.windows()}
old_windows = _indexers_by_win.keys()
obsolete_windows = old_windows - curr_windows
for key in obsolete_windows:
proj_file = _indexers_by_win.pop(key)
_disassociate_window(proj_file, key)
# implicitly refresh all active windows
refresh(explicit_refresh=False)
# Remove orphan indexers
_trim_indexers()
def settings_changed(proj_file=None):
"""
Called each time our settings object
(or project file) has been modified
"""
if proj_file and proj_file in _indexers:
# A specific project file was modified.
# Notify the indexer if the config differs.
indexer_data = _indexers[proj_file]
indexer = indexer_data['indexer']
config = indexer_data['config']
win = _find_window_from_proj_file(proj_file)
if not win:
return
new_config = IndexerConfig(win)
if new_config != config:
indexer.set_config(new_config)
indexer_data['config'] = new_config
else:
# implicitly refresh all active windows
refresh(explicit_refresh=False)
def quit():
"""Closes all indexers and removes them."""
_indexers_by_win.clear()
for indexer_data in _indexers.values():
indexer_data.setdefault('windows',[]).clear()
_trim_indexers()
|
|
from pytest import fixture
from mock import MagicMock
ACTION_PARENT = """
{
"class": [ "test" ],
"properties": { "Name": "Parent" },
"actions": [
{
"name": "action-one",
"method": "GET",
"href": "http://api.x.io/actions/one",
"type": "application/x-www-form-urlencoded",
"fields": [
{ "name": "Name", "type": "text" },
{ "name": "id", "type": "number", "value": "23" }
]
},
{
"name": "action-two",
"method": "GET",
"href": "http://api.x.io/actions/two",
"type": "application/json",
"fields": [
{ "name": "Name", "type": "text" },
{ "name": "id", "type": "number", "value": "23" }
]
},
{
"name": "action-three",
"method": "POST",
"href": "http://api.x.io/actions/three",
"type": "application/x-www-form-urlencoded",
"fields": [
{ "name": "Name", "type": "text" },
{ "name": "id", "type": "number", "value": "23" }
]
},
{
"name": "action-four",
"method": "POST",
"href": "http://api.x.io/actions/four",
"type": "application/json",
"fields": [
{ "name": "Name", "type": "text" },
{ "name": "id", "type": "number", "value": "23" }
]
},
{
"name": "action-five",
"method": "funky",
"href": "http://api.x.io/actions/five",
"type": "application/json",
"fields": [
{ "name": "Name", "type": "text" },
{ "name": "id", "type": "number", "value": "23" }
]
},
{
"name": "action-six",
"method": "POST",
"href": "http://api.x.io/actions/six"
},
{
"name": "action-seven",
"href": "http://api.x.io/actions/seven"
}
]
}
"""
ACTION_RESPONSE = """
{
"class": [ "action-response" ],
"properties": { "Name": "Response It." },
"links": [ { "rel": [ "self" ], "href": "http://api.x.io/supers/1" } ]
}
"""
@fixture
def parent_entity():
session = MagicMock(name='session')
response = session.get.return_value
response.content = ACTION_PARENT
from siren_client import get
parent = get('some_url', session=session)
response.content = ACTION_RESPONSE
return parent
def test_parent(parent_entity):
assert len(parent_entity.actions) == 7
def test_actions_object_singular(parent_entity):
actions_one = parent_entity.actions
actions_two = parent_entity.actions
assert id(actions_one) == id(actions_two)
def test_action_object_singular(parent_entity):
action_one = parent_entity.actions['action-one']
action_two = parent_entity.actions['action-one']
assert id(action_one) == id(action_two)
def test_action_one(parent_entity):
session = parent_entity.client.session
response = parent_entity.actions['action-one']()
assert response['Name'] == 'Response It.'
session.get.assert_called_with("http://api.x.io/actions/one",
data=None,
params={'id': '23'})
def test_action_two(parent_entity):
session = parent_entity.client.session
response = parent_entity.actions['action-two'](Name='SomeName')
assert response['Name'] == 'Response It.'
session.get.assert_called_with("http://api.x.io/actions/two",
data=None,
params={'Name': 'SomeName',
'id': '23'})
def test_action_three(parent_entity):
session = parent_entity.client.session
session.post.return_value.content = ACTION_RESPONSE
response = parent_entity.actions['action-three'](Name='SomeName')
assert response['Name'] == 'Response It.'
session.post.assert_called_with(
"http://api.x.io/actions/three",
data={'id': '23', 'Name': 'SomeName'},
headers={'content-type': 'application/x-www-form-urlencoded'}
)
def test_action_four(parent_entity):
session = parent_entity.client.session
session.post.return_value.content = ACTION_RESPONSE
response = parent_entity.actions['action-four'](Name='SomeName')
assert response['Name'] == 'Response It.'
session.post.assert_called_with(
"http://api.x.io/actions/four",
data='{"id": "23", "Name": "SomeName"}',
headers={'content-type': 'application/json'}
)
def test_action_five(parent_entity):
session = parent_entity.client.session
session.funky.return_value.content = ACTION_RESPONSE
response = parent_entity.actions['action-five'](Name='SomeName')
assert response['Name'] == 'Response It.'
session.funky.assert_called_with(
"http://api.x.io/actions/five",
data='{"id": "23", "Name": "SomeName"}',
headers={'content-type': 'application/json'}
)
def test_action_six(parent_entity):
session = parent_entity.client.session
session.post.return_value.content = ACTION_RESPONSE
response = parent_entity.actions['action-six'](Name='SomeName')
assert response['Name'] == 'Response It.'
session.post.assert_called_with(
"http://api.x.io/actions/six",
data={'Name': 'SomeName'},
headers={'content-type': 'application/x-www-form-urlencoded'}
)
def test_action_seven(parent_entity):
session = parent_entity.client.session
response = parent_entity.actions['action-seven'](Name='SomeName')
assert response['Name'] == 'Response It.'
session.get.assert_called_with("http://api.x.io/actions/seven",
params={'Name': 'SomeName'},
data=None)
def test_populate_action_one(parent_entity):
session = parent_entity.client.session
parent_entity.actions['action-one'].populate(parent_entity)
response = parent_entity.actions['action-one'](id=4)
assert response['Name'] == 'Response It.'
session.get.assert_called_with("http://api.x.io/actions/one",
data=None,
params={'Name': 'Parent', 'id': 4})
def test_populate_action_three(parent_entity):
session = parent_entity.client.session
session.post.return_value.content = ACTION_RESPONSE
parent_entity.actions['action-three'].populate(parent_entity)
response = parent_entity.actions['action-three'](desc='A Desc')
assert response['Name'] == 'Response It.'
session.post.assert_called_with(
"http://api.x.io/actions/three",
data={'id': '23', 'desc': 'A Desc', 'Name': 'Parent'},
headers={'content-type': 'application/x-www-form-urlencoded'}
)
def test_populate_action_six(parent_entity):
session = parent_entity.client.session
session.post.return_value.content = ACTION_RESPONSE
parent_entity.actions['action-six'].populate(parent_entity)
response = parent_entity.actions['action-six'](desc='A Desc')
assert response['Name'] == 'Response It.'
session.post.assert_called_with(
"http://api.x.io/actions/six",
data={'desc': 'A Desc'},
headers={'content-type': 'application/x-www-form-urlencoded'}
)
def test_populate_action_seven(parent_entity):
session = parent_entity.client.session
parent_entity.actions['action-seven'].populate(parent_entity)
response = parent_entity.actions['action-seven'](desc='Some Desc')
assert response['Name'] == 'Response It.'
session.get.assert_called_with(
"http://api.x.io/actions/seven",
params={'desc': 'Some Desc'},
data=None
)
|
|
"""
#;+
#; NAME:
#; igm.tau_eff
#; Version 1.0
#;
#; PURPOSE:
#; Module for tau effective
#; 07-Nov-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function
import os, imp, pickle
import numpy as np
from scipy import interpolate
from xastropy.igm import igm_utils as xigmu
from astropy.io import fits
from astropy.cosmology import FlatLambdaCDM
from astropy import units as u
from xastropy.xutils import xdebug as xdb
# Path for xastropy
xa_path = imp.find_module('xastropy')[1]
# def ew_teff_lyman -- Calcualte tau_effective for the HI Lyman series
# def mk_ew_lyman_spline -- Generates a Pickle file for EW splines
# def teff_obs(z)
# Calculate tau_effective for the Lyman series using the EW
# approximation (e.g. Zuo 93)
def map_etl(dict_inp):
''' Simple routine to enable parallel processing
'''
teff = ew_teff_lyman(dict_inp['ilambda'],
dict_inp['zem'], dict_inp['fN_model'])
# Return
return teff
def ew_teff_lyman(ilambda, zem, fN_model, NHI_MIN=11.5, NHI_MAX=22.0, N_eval=5000,
EW_spline=None, bval=24., fNz=False, cosmo=None, debug=False,
cumul=None, verbose=False):
""" tau effective (follows ew_teff_lyman.pro from XIDL)
teff = ew_teff_lyman(3400., 2.4)
Parameters:
-------------
ilambda: float
Observed wavelength
zem: float
Emission redshift of the source [sets which Lyman lines are included]
bva: float
-- Characteristics Doppler parameter for the Lya forest
-- [Options: 24, 35 km/s]
NHI_MIN: float
-- Minimum log HI column for integration [default = 11.5]
NHI_MAX: float
-- Maximum log HI column for integration [default = 22.0]
fNz: Boolean (False)
-- Inputs f(N,z) instead of f(N,X)
cosmo: astropy.cosmology (None)
-- Cosmological model to adopt (as needed)
cumul: List of cumulative sums
-- Recorded only if cumul is not None
Returns:
teff:
Total effective opacity of all lines contributing
ToDo:
1. Parallelize the Lyman loop
JXP 07 Nov 2014
"""
# Lambda
if not isinstance(ilambda,float):
raise ValueError('igm.tau_eff: ilambda must be a float for now')
Lambda = ilambda
if not isinstance(Lambda,u.quantity.Quantity):
Lambda = Lambda * u.AA # Ang
# Read in EW spline (if needed)
if EW_spline == None:
if int(bval) == 24:
EW_FIL = xa_path+'/igm/EW_SPLINE_b24.p'
elif int(bval) == 35:
EW_FIL = os.environ.get('XIDL_DIR')+'/IGM/EW_SPLINE_b35.fits'
else:
raise ValueError('igm.tau_eff: Not ready for this bvalue %g' % bval)
EW_spline = pickle.load(open(EW_FIL,"rb"))
# Lines
wrest = tau_eff_llist()
# Find the lines
gd_Lyman = wrest[(Lambda/(1+zem)) < wrest]
nlyman = len(gd_Lyman)
if nlyman == 0:
if verbose:
print('igm.tau_eff: No Lyman lines covered at this wavelength')
return 0
# N_HI grid
lgNval = NHI_MIN + (NHI_MAX-NHI_MIN)*np.arange(N_eval)/(N_eval-1) # Base 10
dlgN = lgNval[1]-lgNval[0]
Nval = 10.**lgNval
teff_lyman = np.zeros(nlyman)
# For cumulative
if not cumul is None:
cumul.append(lgNval)
# Loop on the lines
for qq,line in enumerate(gd_Lyman): # Would be great to do this in parallel...
# (Can pack together and should)
# Redshift
zeval = ((Lambda / line) - 1).value
if zeval < 0.:
teff_lyman[qq] = 0.
continue
# Cosmology
if fNz is False:
if cosmo not in locals():
cosmo = FlatLambdaCDM(H0=70, Om0=0.3) # Vanilla
#dxdz = (np.fabs(xigmu.cosm_xz(zeval-0.1, cosmo=cosmo)-
# xigmu.cosm_xz(zeval+0.1,cosmo=cosmo)) / 0.2 )
#xdb.set_trace()
dxdz = xigmu.cosm_xz(zeval,cosmo=cosmo,flg=1)
else: dxdz = 1. # Code is using f(N,z)
#print('dxdz = %g' % dxdz)
# Get EW values (could pack these all together)
idx = np.where(EW_spline['wrest'] == line)[0]
if len(idx) != 1:
raise ValueError('tau_eff: Line %g not included or over included?!' % line)
restEW = interpolate.splev(lgNval, EW_spline['tck'][idx], der=0)
# dz
dz = ((restEW*u.AA) * (1+zeval) / line).value
# Evaluate f(N,X) at zeval
log_fnX = fN_model.eval(lgNval,zeval).flatten()
#xdb.set_trace()
# Sum
intgrnd = 10.**(log_fnX) * dxdz * dz * Nval
teff_lyman[qq] = np.sum(intgrnd) * dlgN * np.log(10.)
if not cumul is None:
cumul.append( np.cumsum(intgrnd) * dlgN * np.log(10.) )
#xdb.set_trace()
# Debug
if debug==True:
xdb.xplot(lgNval, np.log10(10.**(log_fnX) * dxdz * dz * Nval))
#x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
#printcol, lgnval, log_fnx, dz, alog10(10.d^(log_fnX) * dxdz * dz * Nval)
#writecol, 'debug_file'+strtrim(qq,2)+'.dat', lgNval, restEW, log_fnX
xdb.set_trace()
#xdb.set_trace()
return np.sum(teff_lyman)
# ###
# Generate a pickle file of a Spline of EW vs NHI for the Lyman series
def mk_ew_lyman_spline(bval,ew_fil=None):
""" Generate a pickle file of a Spline of EW vs NHI for the Lyman series
Parameters:
bval: float
Doppler parameter (km/s)
ew_fil: string ('EW_SPLINE_b##.p')
Name of output pickle file
"""
from astropy import constants as const
from xastropy.spec import abs_line as xsab
from xastropy.spec import voigt as xsv
# Outfil
if ew_fil == None:
ew_fil = 'EW_SPLINE_b'+str(int(bval))+'.p'
# Units
if not isinstance(bval,u.quantity.Quantity):
bval = bval * u.km/u.s # km/s
# NHI
nspl = 100
log_NHI = 11.0 + 11*np.arange(nspl)/(nspl-1.)
# Lines
wrest= tau_eff_llist()
# Output
outp = {'wrest': wrest, 'tck': []}
# Setup
nvel = 60001
velo = (-30000. + np.arange(nvel,dtype='float64'))*u.km/u.s # km/s
dvel = 1. * u.km/u.s # km/s
uval = velo / bval
# Loop
for cnt,line in enumerate(wrest):
# Get atomic data
abl_data = xsab.abs_line_data(line.value)
# Wave array
dwv = dvel.to(u.cm/u.s) * line / const.c.cgs # Ang
# Voigt
vd = (bval/line).to(u.Hz) # Frequency
a = abl_data['gamma'] / (12.56637 * vd.value)
vgt = xsv.voigtking(uval,a)
# tau
tau = 0.014971475*abl_data['fval']*vgt/vd # Normalized to N_HI = 1 cm^-2
# Flux
tau_array = np.outer(tau, 10.**log_NHI)
fx = np.exp(-1.*tau_array)
# EW
EW = np.sum(1.-fx, 0) * dwv
#EW_SPLINE[qq].EW = EW
# Spline
#EW_SPLINE[qq].splint = spl_init(NHI, EW, /double)
tck = interpolate.splrep(log_NHI, EW)#, s=0)
# Check?
chk=False
#if line == (1215.6701*u.AA):
# xdb.set_trace()
# chk=True
if chk:
from matplotlib import pyplot as plt
plt.clf()
plt.plot(log_NHI,EW,'o')
# Spline
xnew = np.linspace(np.amin(log_NHI),np.amax(log_NHI), nspl*10)
ynew = interpolate.splev(xnew, tck, der=0)
plt.plot(xnew, ynew, '-')
plt.show()
# Output
print('line = %g' % line.value)
outp['tck'].append(tck)
# Write
print('Writing %s' % ew_fil)
pickle.dump( outp, open( ew_fil, "wb" ) )
# Line list for tau_eff HI Lyman calculatons
def tau_eff_llist():
# Imports
# Assumed Line list
wrest = (np.array([1215.6701, 1025.7223, 972.53680, 949.74310, 937.80350,
930.74830, 926.22570, 923.15040, 920.96310, 919.35140,
918.12940, 917.18060, 916.42900, 915.82400, 915.32900,
914.91900, 914.57600, 914.28600, 914.03900, 913.82600,
913.64100, 913.48000, 913.33900, 913.21500, 913.10400,
913.00600, 912.91800, 912.83900, 912.76800, 912.70300,
912.64500],dtype='float64' )) * u.AA
wrest.sort()
return wrest
#
def teff_obs(z):
'''
Report an 'observed' teff value from one of these studies
0 < z < 1.6: Kirkman+07
'''
# Low-z
if z<1.6:
# D_A from Kirkman+07
# No LLS, no metals [masked]
# Unclear in the paper, but I think the range is log NHI = 12-16
DA = 0.016 * (1+z)**1.01
teff = -1. * np.log(1.-DA)
else:
raise ValueError('teff_obs: Not ready for z={:g}'.format(z))
# Return
return teff
## #################################
## #################################
## TESTING
## #################################
if __name__ == '__main__':
'''
# Make EW spline file
mk_ew_lyman_spline(24.)
'''
from xastropy.igm.fN import model as xifm
import multiprocessing
#xdb.set_trace()
# read f(N)
fN_model = xifm.default_model()
print(fN_model)
# tau_eff
#tst_wv = tau_eff_llist()
tst_wv = np.arange(915.,1255,1.)
#lamb = 1215.6701*(1+2.4)
adict = []
for wrest in tst_wv:
tdict = dict(ilambda=wrest*(1+2.4), zem=2.5, fN_model=fN_model)
adict.append(tdict)
pool = multiprocessing.Pool(4) # initialize thread pool N threads
ateff = pool.map(map_etl, adict)
# Plot
xdb.xplot(tst_wv,np.exp(-np.array(ateff)))
#xdb.set_trace()
#teff = ew_teff_lyman(lamb, 2.5, fN_model, NHI_MIN=12., NHI_MAX=17.)
#print('teff at z=2.4 :: %g' % teff)
#teff = ew_teff_lyman(3400., 2.4, fN_model)
#print('teff at 3400A = %g' % teff)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Openstack, LLC
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import urlparse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from keystoneclient import service_catalog
from keystoneclient.v2_0 import client as keystone_client
from keystoneclient.v2_0 import tokens
from horizon.api import base
from horizon import exceptions
LOG = logging.getLogger(__name__)
DEFAULT_ROLE = None
class Service(base.APIDictWrapper):
""" Wrapper for a dict based on the service data from keystone. """
_attrs = ['id', 'type', 'name']
def __init__(self, service, *args, **kwargs):
super(Service, self).__init__(service, *args, **kwargs)
self.url = service['endpoints'][0]['internalURL']
self.host = urlparse.urlparse(self.url).hostname
self.region = service['endpoints'][0]['region']
self.disabled = None
def __unicode__(self):
if(self.type == "identity"):
return _("%(type)s (%(backend)s backend)") \
% {"type": self.type,
"backend": keystone_backend_name()}
else:
return self.type
def __repr__(self):
return "<Service: %s>" % unicode(self)
def _get_endpoint_url(request, endpoint_type, catalog=None):
if getattr(request.user, "service_catalog", None):
return base.url_for(request,
service_type='identity',
endpoint_type=endpoint_type)
return request.session.get('region_endpoint',
getattr(settings, 'OPENSTACK_KEYSTONE_URL'))
def keystoneclient(request, username=None, password=None, tenant_id=None,
token_id=None, endpoint=None, endpoint_type=None,
admin=False):
"""Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
Calls requiring the admin endpoint should have ``admin=True`` passed in
as a keyword argument.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
"""
user = request.user
if admin:
if not user.is_admin():
raise exceptions.NotAuthorized
endpoint_type = 'adminURL'
else:
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
# Take care of client connection caching/fetching a new client.
# Admin vs. non-admin clients are cached separately for token matching.
cache_attr = "_keystone_admin" if admin else "_keystone"
if hasattr(request, cache_attr) and (not token_id
or getattr(request, cache_attr).auth_token == token_id):
LOG.debug("Using cached client for token: %s" % user.token)
conn = getattr(request, cache_attr)
else:
endpoint_lookup = _get_endpoint_url(request, endpoint_type)
auth_url = endpoint or endpoint_lookup
LOG.debug("Creating a new keystoneclient connection to %s." % auth_url)
conn = keystone_client.Client(username=username or user.username,
password=password,
tenant_id=tenant_id or user.tenant_id,
token=token_id or user.token,
auth_url=auth_url,
endpoint=endpoint)
setattr(request, cache_attr, conn)
# Fetch the correct endpoint if we've re-scoped the token.
catalog = getattr(conn, 'service_catalog', None)
if catalog and "serviceCatalog" in catalog.catalog.keys():
catalog = catalog.catalog['serviceCatalog']
endpoint = _get_endpoint_url(request, endpoint_type, catalog)
conn.management_url = endpoint
return conn
def tenant_name(request, tenant_id):
return keystoneclient(request).tenants.get(tenant_id).name
def tenant_create(request, tenant_name, description, enabled):
return keystoneclient(request, admin=True).tenants.create(tenant_name,
description,
enabled)
def tenant_get(request, tenant_id, admin=False):
return keystoneclient(request, admin=admin).tenants.get(tenant_id)
def tenant_delete(request, tenant_id):
keystoneclient(request, admin=True).tenants.delete(tenant_id)
def tenant_list(request, admin=False):
return keystoneclient(request, admin=admin).tenants.list()
def tenant_update(request, tenant_id, tenant_name, description, enabled):
return keystoneclient(request, admin=True).tenants.update(tenant_id,
tenant_name,
description,
enabled)
def tenant_list_for_token(request, token, endpoint_type=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
c = keystoneclient(request,
token_id=token,
endpoint=_get_endpoint_url(request, endpoint_type),
endpoint_type=endpoint_type)
return c.tenants.list()
def token_create(request, tenant, username, password):
'''
Creates a token using the username and password provided. If tenant
is provided it will retrieve a scoped token and the service catalog for
the given tenant. Otherwise it will return an unscoped token and without
a service catalog.
'''
c = keystoneclient(request,
username=username,
password=password,
tenant_id=tenant,
endpoint=_get_endpoint_url(request, 'internalURL'))
token = c.tokens.authenticate(username=username,
password=password,
tenant_id=tenant)
return token
def token_create_scoped(request, tenant, token):
'''
Creates a scoped token using the tenant id and unscoped token; retrieves
the service catalog for the given tenant.
'''
if hasattr(request, '_keystone'):
del request._keystone
c = keystoneclient(request,
tenant_id=tenant,
token_id=token,
endpoint=_get_endpoint_url(request, 'internalURL'))
raw_token = c.tokens.authenticate(tenant_id=tenant,
token=token,
return_raw=True)
c.service_catalog = service_catalog.ServiceCatalog(raw_token)
if request.user.is_admin():
c.management_url = c.service_catalog.url_for(service_type='identity',
endpoint_type='adminURL')
else:
endpoint_type = getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
c.management_url = c.service_catalog.url_for(
service_type='identity', endpoint_type=endpoint_type)
scoped_token = tokens.Token(tokens.TokenManager, raw_token)
return scoped_token
def user_list(request, tenant_id=None):
return keystoneclient(request, admin=True).users.list(tenant_id=tenant_id)
def user_create(request, user_id, email, password, tenant_id, enabled):
return keystoneclient(request, admin=True).users.create(user_id,
password,
email,
tenant_id,
enabled)
def user_delete(request, user_id):
keystoneclient(request, admin=True).users.delete(user_id)
def user_get(request, user_id, admin=True):
return keystoneclient(request, admin=admin).users.get(user_id)
def user_update(request, user, **data):
return keystoneclient(request, admin=True).users.update(user, **data)
def user_update_enabled(request, user_id, enabled):
return keystoneclient(request, admin=True).users.update_enabled(user_id,
enabled)
def user_update_password(request, user_id, password, admin=True):
return keystoneclient(request, admin=admin).users.update_password(user_id,
password)
def user_update_tenant(request, user_id, tenant_id, admin=True):
return keystoneclient(request, admin=admin).users.update_tenant(user_id,
tenant_id)
def role_list(request):
""" Returns a global list of available roles. """
return keystoneclient(request, admin=True).roles.list()
def roles_for_user(request, user, project):
return keystoneclient(request, admin=True).roles.roles_for_user(user,
project)
def add_tenant_user_role(request, tenant_id, user_id, role_id):
""" Adds a role for a user on a tenant. """
return keystoneclient(request, admin=True).roles.add_user_role(user_id,
role_id,
tenant_id)
def remove_tenant_user(request, tenant_id, user_id):
""" Removes all roles from a user on a tenant, removing them from it. """
client = keystoneclient(request, admin=True)
roles = client.roles.roles_for_user(user_id, tenant_id)
for role in roles:
client.roles.remove_user_role(user_id, role.id, tenant_id)
def get_default_role(request):
"""
Gets the default role object from Keystone and saves it as a global
since this is configured in settings and should not change from request
to request. Supports lookup by name or id.
"""
global DEFAULT_ROLE
default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
if default and DEFAULT_ROLE is None:
try:
roles = keystoneclient(request, admin=True).roles.list()
except:
roles = []
exceptions.handle(request)
for role in roles:
if role.id == default or role.name == default:
DEFAULT_ROLE = role
break
return DEFAULT_ROLE
def list_ec2_credentials(request, user_id):
return keystoneclient(request).ec2.list(user_id)
def create_ec2_credentials(request, user_id, tenant_id):
return keystoneclient(request).ec2.create(user_id, tenant_id)
def get_user_ec2_credentials(request, user_id, access_token):
return keystoneclient(request).ec2.get(user_id, access_token)
def keystone_can_edit_user():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['can_edit_user']
else:
return False
def keystone_backend_name():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['name']
else:
return 'unknown'
|
|
from __future__ import print_function
import fnmatch
import os
import re
import sys
import py
import pytest
import execnet
import xdist.remote
def parse_spec_config(config):
xspeclist = []
for xspec in config.getvalue("tx"):
i = xspec.find("*")
try:
num = int(xspec[:i])
except ValueError:
xspeclist.append(xspec)
else:
xspeclist.extend([xspec[i + 1 :]] * num)
if not xspeclist:
raise pytest.UsageError(
"MISSING test execution (tx) nodes: please specify --tx"
)
return xspeclist
class NodeManager(object):
EXIT_TIMEOUT = 10
DEFAULT_IGNORES = [".*", "*.pyc", "*.pyo", "*~"]
def __init__(self, config, specs=None, defaultchdir="pyexecnetcache"):
self.config = config
self.trace = self.config.trace.get("nodemanager")
self.group = execnet.Group()
if specs is None:
specs = self._getxspecs()
self.specs = []
for spec in specs:
if not isinstance(spec, execnet.XSpec):
spec = execnet.XSpec(spec)
if not spec.chdir and not spec.popen:
spec.chdir = defaultchdir
self.group.allocate_id(spec)
self.specs.append(spec)
self.roots = self._getrsyncdirs()
self.rsyncoptions = self._getrsyncoptions()
self._rsynced_specs = set()
def rsync_roots(self, gateway):
"""Rsync the set of roots to the node's gateway cwd."""
if self.roots:
for root in self.roots:
self.rsync(gateway, root, **self.rsyncoptions)
def setup_nodes(self, putevent):
self.config.hook.pytest_xdist_setupnodes(config=self.config, specs=self.specs)
self.trace("setting up nodes")
nodes = []
for spec in self.specs:
nodes.append(self.setup_node(spec, putevent))
return nodes
def setup_node(self, spec, putevent):
gw = self.group.makegateway(spec)
self.config.hook.pytest_xdist_newgateway(gateway=gw)
self.rsync_roots(gw)
node = WorkerController(self, gw, self.config, putevent)
gw.node = node # keep the node alive
node.setup()
self.trace("started node %r" % node)
return node
def teardown_nodes(self):
self.group.terminate(self.EXIT_TIMEOUT)
def _getxspecs(self):
return [execnet.XSpec(x) for x in parse_spec_config(self.config)]
def _getrsyncdirs(self):
for spec in self.specs:
if not spec.popen or spec.chdir:
break
else:
return []
import pytest
import _pytest
pytestpath = pytest.__file__.rstrip("co")
pytestdir = py.path.local(_pytest.__file__).dirpath()
config = self.config
candidates = [py._pydir, pytestpath, pytestdir]
candidates += config.option.rsyncdir
rsyncroots = config.getini("rsyncdirs")
if rsyncroots:
candidates.extend(rsyncroots)
roots = []
for root in candidates:
root = py.path.local(root).realpath()
if not root.check():
raise pytest.UsageError("rsyncdir doesn't exist: %r" % (root,))
if root not in roots:
roots.append(root)
return roots
def _getrsyncoptions(self):
"""Get options to be passed for rsync."""
ignores = list(self.DEFAULT_IGNORES)
ignores += self.config.option.rsyncignore
ignores += self.config.getini("rsyncignore")
return {
"ignores": ignores,
"verbose": getattr(self.config.option, "verbose", False),
}
def rsync(self, gateway, source, notify=None, verbose=False, ignores=None):
"""Perform rsync to remote hosts for node."""
# XXX This changes the calling behaviour of
# pytest_xdist_rsyncstart and pytest_xdist_rsyncfinish to
# be called once per rsync target.
rsync = HostRSync(source, verbose=verbose, ignores=ignores)
spec = gateway.spec
if spec.popen and not spec.chdir:
# XXX This assumes that sources are python-packages
# and that adding the basedir does not hurt.
gateway.remote_exec(
"""
import sys ; sys.path.insert(0, %r)
"""
% os.path.dirname(str(source))
).waitclose()
return
if (spec, source) in self._rsynced_specs:
return
def finished():
if notify:
notify("rsyncrootready", spec, source)
rsync.add_target_host(gateway, finished=finished)
self._rsynced_specs.add((spec, source))
self.config.hook.pytest_xdist_rsyncstart(source=source, gateways=[gateway])
rsync.send()
self.config.hook.pytest_xdist_rsyncfinish(source=source, gateways=[gateway])
class HostRSync(execnet.RSync):
""" RSyncer that filters out common files
"""
def __init__(self, sourcedir, *args, **kwargs):
self._synced = {}
self._ignores = []
ignores = kwargs.pop("ignores", None) or []
for x in ignores:
x = getattr(x, "strpath", x)
self._ignores.append(re.compile(fnmatch.translate(x)))
super(HostRSync, self).__init__(sourcedir=sourcedir, **kwargs)
def filter(self, path):
path = py.path.local(path)
for cre in self._ignores:
if cre.match(path.basename) or cre.match(path.strpath):
return False
else:
return True
def add_target_host(self, gateway, finished=None):
remotepath = os.path.basename(self._sourcedir)
super(HostRSync, self).add_target(
gateway, remotepath, finishedcallback=finished, delete=True
)
def _report_send_file(self, gateway, modified_rel_path):
if self._verbose:
path = os.path.basename(self._sourcedir) + "/" + modified_rel_path
remotepath = gateway.spec.chdir
print("%s:%s <= %s" % (gateway.spec, remotepath, path))
def make_reltoroot(roots, args):
# XXX introduce/use public API for splitting pytest args
splitcode = "::"
result = []
for arg in args:
parts = arg.split(splitcode)
fspath = py.path.local(parts[0])
if not fspath.exists():
continue
for root in roots:
x = fspath.relto(root)
if x or fspath == root:
parts[0] = root.basename + "/" + x
break
else:
raise ValueError("arg %s not relative to an rsync root" % (arg,))
result.append(splitcode.join(parts))
return result
class WorkerController(object):
ENDMARK = -1
class RemoteHook:
@pytest.mark.trylast
def pytest_xdist_getremotemodule(self):
return xdist.remote
def __init__(self, nodemanager, gateway, config, putevent):
config.pluginmanager.register(self.RemoteHook())
self.nodemanager = nodemanager
self.putevent = putevent
self.gateway = gateway
self.config = config
self.workerinput = {
"workerid": gateway.id,
"workercount": len(nodemanager.specs),
"slaveid": gateway.id,
"slavecount": len(nodemanager.specs),
"mainargv": sys.argv,
}
# TODO: deprecated name, backward compatibility only. Remove it in future
self.slaveinput = self.workerinput
self._down = False
self._shutdown_sent = False
self.log = py.log.Producer("workerctl-%s" % gateway.id)
if not self.config.option.debug:
py.log.setconsumer(self.log._keywords, None)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.gateway.id)
@property
def shutting_down(self):
return self._down or self._shutdown_sent
def setup(self):
self.log("setting up worker session")
spec = self.gateway.spec
if hasattr(self.config, "invocation_params"):
args = [str(x) for x in self.config.invocation_params.args or ()]
option_dict = {}
else:
args = self.config.args
option_dict = vars(self.config.option)
if not spec.popen or spec.chdir:
args = make_reltoroot(self.nodemanager.roots, args)
if spec.popen:
name = "popen-%s" % self.gateway.id
if hasattr(self.config, "_tmpdirhandler"):
basetemp = self.config._tmpdirhandler.getbasetemp()
option_dict["basetemp"] = str(basetemp.join(name))
self.config.hook.pytest_configure_node(node=self)
remote_module = self.config.hook.pytest_xdist_getremotemodule()
self.channel = self.gateway.remote_exec(remote_module)
# change sys.path only for remote workers
change_sys_path = not self.gateway.spec.popen
self.channel.send((self.workerinput, args, option_dict, change_sys_path))
if self.putevent:
self.channel.setcallback(self.process_from_remote, endmarker=self.ENDMARK)
def ensure_teardown(self):
if hasattr(self, "channel"):
if not self.channel.isclosed():
self.log("closing", self.channel)
self.channel.close()
# del self.channel
if hasattr(self, "gateway"):
self.log("exiting", self.gateway)
self.gateway.exit()
# del self.gateway
def send_runtest_some(self, indices):
self.sendcommand("runtests", indices=indices)
def send_runtest_all(self):
self.sendcommand("runtests_all")
def shutdown(self):
if not self._down:
try:
self.sendcommand("shutdown")
except (IOError, OSError):
pass
self._shutdown_sent = True
def sendcommand(self, name, **kwargs):
""" send a named parametrized command to the other side. """
self.log("sending command %s(**%s)" % (name, kwargs))
self.channel.send((name, kwargs))
def notify_inproc(self, eventname, **kwargs):
self.log("queuing %s(**%s)" % (eventname, kwargs))
self.putevent((eventname, kwargs))
def process_from_remote(self, eventcall): # noqa too complex
""" this gets called for each object we receive from
the other side and if the channel closes.
Note that channel callbacks run in the receiver
thread of execnet gateways - we need to
avoid raising exceptions or doing heavy work.
"""
try:
if eventcall == self.ENDMARK:
err = self.channel._getremoteerror()
if not self._down:
if not err or isinstance(err, EOFError):
err = "Not properly terminated" # lost connection?
self.notify_inproc("errordown", node=self, error=err)
self._down = True
return
eventname, kwargs = eventcall
if eventname in ("collectionstart",):
self.log("ignoring %s(%s)" % (eventname, kwargs))
elif eventname == "workerready":
self.notify_inproc(eventname, node=self, **kwargs)
elif eventname == "workerfinished":
self._down = True
self.workeroutput = self.slaveoutput = kwargs["workeroutput"]
self.notify_inproc("workerfinished", node=self)
elif eventname in ("logstart", "logfinish"):
self.notify_inproc(eventname, node=self, **kwargs)
elif eventname in ("testreport", "collectreport", "teardownreport"):
item_index = kwargs.pop("item_index", None)
rep = self.config.hook.pytest_report_from_serializable(
config=self.config, data=kwargs["data"]
)
if item_index is not None:
rep.item_index = item_index
self.notify_inproc(eventname, node=self, rep=rep)
elif eventname == "collectionfinish":
self.notify_inproc(eventname, node=self, ids=kwargs["ids"])
elif eventname == "runtest_protocol_complete":
self.notify_inproc(eventname, node=self, **kwargs)
elif eventname == "logwarning":
self.notify_inproc(
eventname,
message=kwargs["message"],
code=kwargs["code"],
nodeid=kwargs["nodeid"],
fslocation=kwargs["nodeid"],
)
elif eventname == "warning_captured":
warning_message = unserialize_warning_message(
kwargs["warning_message_data"]
)
self.notify_inproc(
eventname,
warning_message=warning_message,
when=kwargs["when"],
item=kwargs["item"],
)
else:
raise ValueError("unknown event: %s" % (eventname,))
except KeyboardInterrupt:
# should not land in receiver-thread
raise
except: # noqa
from _pytest._code import ExceptionInfo
# ExceptionInfo API changed in pytest 4.1
if hasattr(ExceptionInfo, "from_current"):
excinfo = ExceptionInfo.from_current()
else:
excinfo = ExceptionInfo()
print("!" * 20, excinfo)
self.config.notify_exception(excinfo)
self.shutdown()
self.notify_inproc("errordown", node=self, error=excinfo)
def unserialize_warning_message(data):
import warnings
import importlib
if data["message_module"]:
mod = importlib.import_module(data["message_module"])
cls = getattr(mod, data["message_class_name"])
message = None
if data["message_args"] is not None:
try:
message = cls(*data["message_args"])
except TypeError:
pass
if message is None:
# could not recreate the original warning instance;
# create a generic Warning instance with the original
# message at least
message_text = "{mod}.{cls}: {msg}".format(
mod=data["message_module"],
cls=data["message_class_name"],
msg=data["message_str"],
)
message = Warning(message_text)
else:
message = data["message_str"]
if data["category_module"]:
mod = importlib.import_module(data["category_module"])
category = getattr(mod, data["category_class_name"])
else:
category = None
kwargs = {"message": message, "category": category}
# access private _WARNING_DETAILS because the attributes vary between Python versions
for attr_name in warnings.WarningMessage._WARNING_DETAILS:
if attr_name in ("message", "category"):
continue
kwargs[attr_name] = data[attr_name]
return warnings.WarningMessage(**kwargs)
|
|
import copy
import unittest
import os
import shutil
import sys
import traceback
import numpy as np
from numpy.linalg import LinAlgError
import sklearn.datasets
from autosklearn.data.util import convert_to_bin
from autosklearn.data.competition_data_manager import CompetitionDataManager
from autosklearn.models.holdout_evaluator import HoldoutEvaluator
from autosklearn.models.paramsklearn import get_configuration_space
from ParamSklearn.util import get_dataset
from autosklearn.constants import *
N_TEST_RUNS = 10
class Dummy(object):
pass
class HoldoutEvaluator_Test(unittest.TestCase):
def test_evaluate_multiclass_classification(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_valid = X_test[:25,]
Y_valid = Y_test[:25,]
X_test = X_test[25:,]
Y_test = Y_test[25:,]
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators = ['ridge'],
include_preprocessors = ['select_rates'])
err = np.zeros([N_TEST_RUNS])
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err[i] = evaluator.predict()
print err[i]
self.assertTrue(np.isfinite(err[i]))
self.assertGreaterEqual(err[i], 0.0)
print "Number of times it was worse than random guessing:" + str(np.sum(err > 1))
def test_evaluate_multiclass_classification_all_metrics(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['ridge'],
include_preprocessors=['select_rates'])
# Test all scoring functions
err = []
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration,
all_scoring_functions=True)
if not self._fit(evaluator):
print
continue
err.append(evaluator.predict())
print err[-1]
self.assertIsInstance(err[-1], dict)
for key in err[-1]:
self.assertEqual(len(err[-1]), 5)
self.assertTrue(np.isfinite(err[-1][key]))
self.assertGreaterEqual(err[-1][key], 0.0)
print "Number of times it was worse than random guessing:" + str(
np.sum(err > 1))
def test_evaluate_multilabel_classification(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
Y_train = np.array(convert_to_bin(Y_train, 3))
Y_train[:,-1] = 1
Y_test = np.array(convert_to_bin(Y_test, 3))
Y_test[:, -1] = 1
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'f1_metric', 'task': MULTILABEL_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['random_forest'],
include_preprocessors=['no_preprocessing'])
err = np.zeros([N_TEST_RUNS])
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err[i] = evaluator.predict()
print err[i]
self.assertTrue(np.isfinite(err[i]))
self.assertGreaterEqual(err[i], 0.0)
print "Number of times it was worse than random guessing:" + str(
np.sum(err > 1))
def test_evaluate_binary_classification(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
eliminate_class_two = Y_train != 2
X_train = X_train[eliminate_class_two]
Y_train = Y_train[eliminate_class_two]
eliminate_class_two = Y_test != 2
X_test = X_test[eliminate_class_two]
Y_test = Y_test[eliminate_class_two]
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'auc_metric', 'task': BINARY_CLASSIFICATION,
'is_sparse': False, 'target_num': 2}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['ridge'],
include_preprocessors=['select_rates'])
err = np.zeros([N_TEST_RUNS])
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err[i] = evaluator.predict()
self.assertTrue(np.isfinite(err[i]))
print err[i]
self.assertGreaterEqual(err[i], 0.0)
print "Number of times it was worse than random guessing:" + str(
np.sum(err > 1))
def test_evaluate_regression(self):
X_train, Y_train, X_test, Y_test = get_dataset('boston')
X_valid = X_test[:200, ]
Y_valid = Y_test[:200, ]
X_test = X_test[200:, ]
Y_test = Y_test[200:, ]
D = Dummy()
D.info = {'metric': 'r2_metric', 'task': REGRESSION,
'is_sparse': False, 'target_num': 1}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical',
'numerical', 'numerical', 'numerical', 'numerical',
'numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(D.info,
include_estimators=['random_forest'],
include_preprocessors=['no_preprocessing'])
err = np.zeros([N_TEST_RUNS])
for i in range(N_TEST_RUNS):
print "Evaluate configuration: %d; result:" % i,
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err[i] = evaluator.predict()
self.assertTrue(np.isfinite(err[i]))
print err[i]
self.assertGreaterEqual(err[i], 0.0)
print "Number of times it was worse than random guessing:" + str(
np.sum(err > 1))
def test_with_abalone(self):
dataset = "abalone"
dataset_dir = os.path.join(os.path.dirname(__file__), ".datasets")
D = CompetitionDataManager(dataset, dataset_dir)
configuration_space = get_configuration_space(D.info,
include_estimators=['extra_trees'],
include_preprocessors=['no_preprocessing'])
errors = []
for i in range(N_TEST_RUNS):
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
if not self._fit(evaluator):
print
continue
err = evaluator.predict()
self.assertLess(err, 0.99)
self.assertTrue(np.isfinite(err))
errors.append(err)
# This is a reasonable bound
self.assertEqual(10, len(errors))
self.assertLess(min(errors), 0.77)
def test_5000_classes(self):
weights = ([0.0002] * 4750) + ([0.0001] * 250)
X, Y = sklearn.datasets.make_classification(n_samples=10000,
n_features=20,
n_classes=5000,
n_clusters_per_class=1,
n_informative=15,
n_redundant=5,
n_repeated=0,
weights=weights,
flip_y=0,
class_sep=1.0,
hypercube=True,
shift=None,
scale=1.0,
shuffle=True,
random_state=1)
self.assertEqual(250, np.sum(np.bincount(Y) == 1))
D = Dummy()
D.info = {'metric': 'r2_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 1}
D.data = {'X_train': X, 'Y_train': Y,
'X_valid': X, 'X_test': X}
D.feat_type = ['numerical'] * 5000
configuration_space = get_configuration_space(D.info,
include_estimators=['extra_trees'],
include_preprocessors=['no_preprocessing'])
configuration = configuration_space.sample_configuration()
D_ = copy.deepcopy(D)
evaluator = HoldoutEvaluator(D_, configuration)
evaluator.fit()
def _fit(self, evaluator):
"""Allow us to catch known and valid exceptions for all evaluate
scripts."""
try:
evaluator.fit()
return True
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in e.message or \
"removed all features" in e.message or \
"failed to create intent" in e.message:
pass
else:
traceback.print_tb(sys.exc_info()[2])
raise e
except LinAlgError as e:
if "not positive definite, even with jitter" in e.message:
pass
else:
raise e
except AttributeError as e:
# Some error in QDA
if "log" == e.message:
pass
else:
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.message:
pass
elif "divide by zero encountered in divide" in e.message:
pass
else:
raise e
except UserWarning as e:
if "FastICA did not converge" in e.message:
pass
else:
raise e
def test_file_output(self):
output_dir = os.path.join(os.getcwd(), ".test")
try:
shutil.rmtree(output_dir)
except:
pass
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
D.basename = "test"
configuration_space = get_configuration_space(D.info)
while True:
configuration = configuration_space.sample_configuration()
evaluator = HoldoutEvaluator(D, configuration,
with_predictions=True,
all_scoring_functions=True,
output_dir=output_dir,
output_y_test=True)
if not self._fit(evaluator):
print
continue
evaluator.predict()
evaluator.file_output()
self.assertTrue(os.path.exists(os.path.join(output_dir,
"y_optimization.npy")))
break
def test_predict_proba_binary_classification(self):
X_train, Y_train, X_test, Y_test = get_dataset('iris')
eliminate_class_two = Y_train != 2
X_train = X_train[eliminate_class_two]
Y_train = Y_train[eliminate_class_two]
eliminate_class_two = Y_test != 2
X_test = X_test[eliminate_class_two]
Y_test = Y_test[eliminate_class_two]
X_valid = X_test[:25, ]
Y_valid = Y_test[:25, ]
X_test = X_test[25:, ]
Y_test = Y_test[25:, ]
class Dummy2(object):
def predict_proba(self, y, batch_size=200):
return np.array([[0.1, 0.9], [0.7, 0.3]])
model = Dummy2()
task_type = BINARY_CLASSIFICATION
D = Dummy()
D.info = {'metric': 'bac_metric', 'task': task_type,
'is_sparse': False, 'target_num': 3}
D.data = {'X_train': X_train, 'Y_train': Y_train,
'X_valid': X_valid, 'X_test': X_test}
D.feat_type = ['numerical', 'Numerical', 'numerical', 'numerical']
configuration_space = get_configuration_space(
D.info, include_estimators=['ridge'],
include_preprocessors=['select_rates'])
configuration = configuration_space.sample_configuration()
evaluator = HoldoutEvaluator(D, configuration)
pred = evaluator.predict_proba(None, model, task_type)
expected = [[0.9], [0.3]]
for i in range(len(expected)):
self.assertEqual(expected[i], pred[i])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_evaluate']
unittest.main()
|
|
# Test case for the os.poll() function
import os
import random
import select
import _testcapi
try:
import threading
except ImportError:
threading = None
import time
import unittest
from test.test_support import TESTFN, run_unittest, reap_threads
try:
select.poll
except AttributeError:
raise unittest.SkipTest, "select.poll not defined -- skipping test_poll"
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class PollTests(unittest.TestCase):
def test_poll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.poll()
NUM_PIPES = 12
MSG = " This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError, "no pipes ready for writing"
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError, "no pipes ready for reading"
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def poll_unit_tests(self):
# returns NVAL for invalid file descriptor
FD = 42
try:
os.close(FD)
except OSError:
pass
p = select.poll()
p.register(FD)
r = p.poll()
self.assertEqual(r[0], (FD, select.POLLNVAL))
f = open(TESTFN, 'w')
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
self.assertEqual(r[0][0], fd)
f.close()
r = p.poll()
self.assertEqual(r[0], (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
self.assertRaises(TypeError, p.register, p)
self.assertRaises(TypeError, p.unregister, p)
# can't unregister non-existent object
p = select.poll()
self.assertRaises(KeyError, p.unregister, 3)
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
self.assertRaises(TypeError, pollster.register, Nope(), 0)
self.assertRaises(TypeError, pollster.register, Almost(), 0)
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != "":
self.fail('error: pipe seems to be closed, but still returns data')
continue
elif flags & select.POLLIN:
line = p.readline()
if not line:
break
continue
else:
self.fail('Unexpected return value from select.poll: %s' % fdlist)
p.close()
def test_poll3(self):
# test int overflow
pollster = select.poll()
pollster.register(1)
self.assertRaises(OverflowError, pollster.poll, 1L << 64)
x = 2 + 3
if x != 5:
self.fail('Overflow must have occurred')
pollster = select.poll()
# Issue 15989
self.assertRaises(OverflowError, pollster.register, 0,
_testcapi.SHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.register, 0,
_testcapi.USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_threaded_poll(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
rfds = []
for i in range(10):
fd = os.dup(r)
self.addCleanup(os.close, fd)
rfds.append(fd)
pollster = select.poll()
for fd in rfds:
pollster.register(fd, select.POLLIN)
t = threading.Thread(target=pollster.poll)
t.start()
try:
time.sleep(0.5)
# trigger ufds array reallocation
for fd in rfds:
pollster.unregister(fd)
pollster.register(w, select.POLLOUT)
self.assertRaises(RuntimeError, pollster.poll)
finally:
# and make the call to poll() from the thread return
os.write(w, b'spam')
t.join()
def test_main():
run_unittest(PollTests)
if __name__ == '__main__':
test_main()
|
|
import inspect
import functools
import asyncio
import typing as t
import contextlib
import types
from .module import Module
from .util import isasync
FuncType = t.Callable[..., t.Any]
Decorator = t.Callable[[FuncType], FuncType]
UNSET = inspect.Parameter.empty
class NoProvider(RuntimeError):
pass
class Injector(object):
# modules: t.List[Module]
# providers: SyncProviderMap
# async_providers: AsyncProviderMap
def __init__(
self,
_sync_dep_klass: t.Type["Dependency"] = None,
_async_dep_klass: t.Type["Dependency"] = None,
):
self.modules = []
self.providers = {}
self.async_providers = {}
self._sync_dep = _sync_dep_klass or Dependencies
self._async_dep = _async_dep_klass or AsyncDependencies
def load(self, *modules: Module):
"""Load the given modules in the provided order.
Any providers in the modules will take precedence over
any already loaded providers.
"""
for module in modules:
module.load(self)
self._load_module(module)
def unload(self, *modules: Module) -> None:
"""Unload the given modules.
If the module is not loaded, nothing will happen.
Any providers that have been superceded by providers in the
unloaded module will be reinstated.
"""
keep = self.modules[:]
self.modules = []
self.providers = {}
self.async_providers = {}
for m in keep:
if m in modules:
m.unload(self)
continue
self._load_module(m)
def _load_module(self, module: Module) -> None:
self.modules.append(module)
for feature, provider in module.providers.items():
self.providers[feature] = (module, provider)
for feature, provider in module.async_providers.items():
self.async_providers[feature] = (module, provider)
def wrap_dependent(self, func: FuncType) -> FuncType:
"""Wrap a function to have it's dependencies injected.
The returning function will have a `__dependencies__` attribute
used to manage dependencies and parameters for the wrapped function.
Note: This does not specify which dependencies to inject.
"""
if hasattr(func, "__dependencies__"):
return func
if isasync(func):
klass = self._async_dep
else:
klass = self._sync_dep
injected = klass(self, func)
@functools.wraps(func)
def wrapped(*args, **kwargs):
return injected.call_injected(*args, **kwargs)
wrapped.__dependencies__ = injected
return wrapped
def __call__(self, func: FuncType) -> FuncType:
"""Wrap a function and attempt to discover it's dependencies by
inspecting the annotations on kwarg-only arguments.
>>>
>>> @injector
>>> def my_func(*, a_frob: Frob):
>>> assert isinstance(a_frob, Frob)
>>>
"""
func = self.wrap_dependent(func)
func.__dependencies__.inspect_dependencies()
return func
def inject(self, **mapping) -> Decorator:
"""Wrap a function and specify which dependencies to inject on which
kwargs.
>>>
>>> @injector.inject(a_frob: Frob)
>>> def my_func(a_frob):
>>> assert isinstance(a_frob, Frob)
>>>
"""
def wrapper(func: FuncType) -> FuncType:
func = self.wrap_dependent(func)
for kwarg, feature in mapping.items():
func.__dependencies__.add_dependency(kwarg, feature)
return func
return wrapper
def param(self, kwarg, __feature=None, **params) -> Decorator:
"""Specify parameters to pass to the dependencies provider.
>>>
>>> @injector
>>> @injector.param('a_frob', frobulation='high')
>>> def my_func(a_frob: Frob):
>>> assert a_frob.frobulation == 'high'
>>>
You can also specify the dependency type as an optional second
argument.
>>>
>>> @injector.param('a_frob', Frob, frobulation='high')
>>> def my_func(a_frob):
>>> assert a_frob.frobulation == 'high'
>>>
"""
def wrapper(func: FuncType) -> FuncType:
func = self.wrap_dependent(func)
if __feature:
func.__dependencies__.add_dependency(kwarg, __feature)
func.__dependencies__.add_params(kwarg, params)
return func
return wrapper
def _get(self, feature, params=None, default=UNSET):
"""Get the resolved dependency for `feature`."""
params = params or {}
provider_map = self.providers
if feature not in provider_map:
if default is UNSET:
raise NoProvider("No provider for {!r}".format(feature))
else:
return default, False
module, provider = provider_map[feature]
return (
provider(module, **params),
getattr(provider, "__contextprovider__", False),
)
def get(self, feature, params=None, default=UNSET):
dep, _ = self._get(feature, params, default)
return dep
def _get_async(self, feature, params=None):
"""Get the resolved async dependency for `feature`."""
provider_map = self.async_providers
if feature not in provider_map:
raise NoProvider("No provider for {!r}".format(feature))
module, provider = provider_map[feature]
return (
provider(module, **params),
getattr(provider, "__contextprovider__", False),
)
def get_async(self, feature, params=None):
dep, _ = self._get_async(feature, params)
return dep
def _parameter_injectable(parameter: inspect.Parameter):
return parameter.kind == inspect.Parameter.KEYWORD_ONLY
class Dependencies(object):
"""Container class to manage dependencies for an injected function.
"""
def __init__(self, injector: Injector, func: FuncType) -> None:
functools.update_wrapper(self, func)
self.injector = injector
self.func = func
self.signature = inspect.signature(func)
self.dependency_params = {}
self.dependencies = {}
self.defaults = {
kwarg: param.default for kwarg, param in self.signature.parameters.items()
}
def __repr__(self):
params = ", ".join(["{}={!r}".format(k, v) for k, v in self.dependency_params])
return "<injected {self.func.__name__} ({params})>".format(
self=self, params=params
)
def add_dependency(self, kwarg: str, feature) -> None:
if kwarg in self.dependencies:
raise RuntimeError("Dependency for kwarg {!r} exists".format(kwarg))
self.dependencies[kwarg] = feature
def add_params(self, kwarg, params):
self.dependency_params.setdefault(kwarg, {}).update(params)
def inspect_dependencies(self):
for kwarg, parameter in self.signature.parameters.items():
if (
not _parameter_injectable(parameter)
or parameter.annotation == inspect.Parameter.empty
):
continue
self.dependencies[kwarg] = parameter.annotation
def resolve_dependencies(self, called_kwargs, stack):
output = {}
for kwarg, feature in self.dependencies.items():
if kwarg in called_kwargs:
# Dependency already provided explicitly
continue
params = self.dependency_params.get(kwarg, {})
default = self.defaults.get(kwarg, UNSET)
dep, isctx = self.injector._get(feature, params=params, default=default)
if isctx:
dep = stack.enter_context(dep)
output[kwarg] = dep
return output
def call_injected(self, *args, **kwargs) -> t.Any:
with contextlib.ExitStack() as stack:
kwargs.update(self.resolve_dependencies(kwargs, stack))
return self.func(*args, **kwargs)
class AsyncDependencies(Dependencies):
"""Container class to manage dependencies for an injected async function.
"""
async def resolve_dependencies(self, called_kwargs, stack):
output = {}
futures = {}
for kwarg, feature in self.dependencies.items():
if kwarg in called_kwargs:
continue
params = self.dependency_params.get(kwarg, {})
try:
dep, isctx = self.injector._get_async(feature, params)
if isctx:
dep = stack.enter_async_context(dep)
futures[kwarg] = dep
except NoProvider:
dep, isctx = self.injector._get(
feature, params=params, default=self.defaults.get(kwarg, UNSET)
)
if isctx:
dep = stack.enter_context(dep)
output[kwarg] = dep
for k, v in futures.items():
output[k] = await v
return output
def call_injected(self, *args, **kwargs) -> t.Any:
if not asyncio.iscoroutinefunction(self.func):
# We can assume that a non-coroutinefunction is actually a generator
return self._yield_injected(*args, **kwargs)
else:
return self._return_injected(*args, **kwargs)
async def _return_injected(self, *args, **kwargs) -> t.Any:
async with contextlib.AsyncExitStack() as stack:
kwargs.update(await self.resolve_dependencies(kwargs, stack))
return await self.func(*args, **kwargs)
async def _yield_injected(self, *args, **kwargs) -> t.Any:
async with contextlib.AsyncExitStack() as stack:
kwargs.update(await self.resolve_dependencies(kwargs, stack))
async for x in self.func(*args, **kwargs):
yield x
|
|
# Copyright (C) 2015 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import numpy as np
from phonopy.structure.cells import print_cell
def show_general_settings(settings,
run_mode,
phono3py,
cell_filename,
input_filename,
output_filename):
is_primitive_axes_auto = (type(phono3py.primitive_matrix) is str and
phono3py.primitive_matrix == 'auto')
primitive_matrix = phono3py.primitive_matrix
supercell_matrix = phono3py.supercell_matrix
phonon_supercell_matrix = phono3py.phonon_supercell_matrix
print("-" * 29 + " General settings " + "-" * 29)
print("Run mode: %s" % run_mode)
if output_filename:
print("Output filename is modified by %s." % output_filename)
if input_filename:
print("Input filename is modified by %s." % input_filename)
if settings.hdf5_compression:
print("HDF5 data compression filter: %s" % settings.hdf5_compression)
if phono3py.calculator:
print("Calculator interface: %s" % phono3py.calculator)
print("Crystal structure was read from \"%s\"." % cell_filename)
if (np.diag(np.diag(supercell_matrix)) - supercell_matrix).any():
print("Supercell matrix (dim):")
for v in supercell_matrix:
print(" %s" % v)
else:
print("Supercell (dim): %s" % np.diag(supercell_matrix))
if phonon_supercell_matrix is not None:
if (np.diag(np.diag(phonon_supercell_matrix))
- phonon_supercell_matrix).any():
print("Phonon supercell matrix (dim-fc2):")
for v in phonon_supercell_matrix:
print(" %s" % v)
else:
print("Phonon supercell (dim-fc2): %s"
% np.diag(phonon_supercell_matrix))
if is_primitive_axes_auto:
print("Primitive matrix (Auto):")
for v in primitive_matrix:
print(" %s" % v)
elif primitive_matrix is not None:
print("Primitive matrix:")
for v in primitive_matrix:
print(" %s" % v)
def show_phono3py_cells(phono3py, settings):
symmetry = phono3py.symmetry
primitive = phono3py.primitive
supercell = phono3py.supercell
phonon_primitive = phono3py.phonon_primitive
phonon_supercell = phono3py.phonon_supercell
print("Spacegroup: %s" % symmetry.get_international_table())
print("-" * 30 + " primitive cell " + "-" * 30)
print_cell(primitive)
print("-" * 32 + " supercell " + "-" * 33)
print_cell(supercell, mapping=primitive.s2p_map)
if settings.phonon_supercell_matrix is not None:
print("-" * 19 + " primitive cell for harmonic phonon " + "-" * 20)
print_cell(phonon_primitive)
print("-" * 21 + " supercell for harmonic phonon " + "-" * 22)
print_cell(phonon_supercell, mapping=phonon_primitive.s2p_map)
print("-" * 76)
def show_phono3py_force_constants_settings(settings):
read_fc3 = settings.read_fc3
read_fc2 = settings.read_fc2
symmetrize_fc3r = (settings.is_symmetrize_fc3_r or
settings.fc_symmetry)
symmetrize_fc3q = settings.is_symmetrize_fc3_q
symmetrize_fc2 = (settings.is_symmetrize_fc2 or
settings.fc_symmetry)
print("-" * 29 + " Force constants " + "-" * 30)
if settings.fc_calculator == 'alm' and not read_fc2:
print("Use ALM for getting fc2 (simultaneous fit to fc2 and fc3)")
else:
print("Imposing translational and index exchange symmetry to fc2: %s" %
symmetrize_fc2)
if settings.is_isotope or settings.is_joint_dos:
pass
elif settings.fc_calculator == 'alm' and not read_fc3:
print("Use ALM for getting fc3")
else:
print("Imposing translational and index exchange symmetry to fc3: "
"%s" % symmetrize_fc3r)
print(("Imposing symmetry of index exchange to fc3 in reciprocal "
"space: %s") % symmetrize_fc3q)
if settings.cutoff_fc3_distance is not None:
print("FC3 cutoff distance: %s" % settings.cutoff_fc3_distance)
def show_phono3py_settings(phono3py, settings, updated_settings, log_level):
sigmas = updated_settings['sigmas']
temperatures = updated_settings['temperatures']
temperature_points = updated_settings['temperature_points']
grid_points = updated_settings['grid_points']
cutoff_frequency = updated_settings['cutoff_frequency']
frequency_factor_to_THz = updated_settings['frequency_factor_to_THz']
frequency_scale_factor = updated_settings['frequency_scale_factor']
frequency_step = updated_settings['frequency_step']
num_frequency_points = updated_settings['num_frequency_points']
print("-" * 27 + " Calculation settings " + "-" * 27)
if settings.is_nac:
print("Non-analytical term correction (NAC): %s" % settings.is_nac)
if phono3py.nac_params:
print("NAC unit conversion factor: %9.5f"
% phono3py.nac_params['factor'])
if settings.nac_q_direction is not None:
print("NAC q-direction: %s" % settings.nac_q_direction)
if phono3py.mesh_numbers is not None:
print("Mesh sampling: [ %d %d %d ]" % tuple(phono3py.mesh_numbers))
if settings.mesh_divisors is not None and settings.is_bterta:
print("Mesh divisors: [ %d %d %d ]" % tuple(settings.mesh_divisors))
if settings.band_indices is not None and not settings.is_bterta:
print(("Band indices: [" + " %s" * len(settings.band_indices) + " ]") %
tuple([np.array(bi) + 1 for bi in settings.band_indices]))
if sigmas:
text = "BZ integration: "
for i, sigma in enumerate(sigmas):
if sigma:
text += "Smearing=%s" % sigma
if settings.sigma_cutoff_width is not None:
text += "(%4.2f SD)" % settings.sigma_cutoff_width
else:
text += "Tetrahedron-method"
if i < len(sigmas) - 1:
text += ", "
print(text)
if settings.is_lbte and settings.read_collision is not None:
pass
elif (settings.is_real_self_energy or
settings.is_imag_self_energy or
settings.is_spectral_function or
settings.is_bterta):
if len(temperatures) > 5:
text = (" %.1f " * 5 + "...") % tuple(temperatures[:5])
text += " %.1f" % temperatures[-1]
else:
text = (" %.1f " * len(temperatures)) % tuple(temperatures)
print("Temperature: " + text)
elif temperature_points is not None:
print(("Temperatures:" + " %.1f " * len(temperature_points))
% tuple(temperature_points))
if settings.scattering_event_class is not None:
print("Scattering event class: %s"
% settings.scattering_event_class)
if grid_points is not None:
text = "Grid point to be calculated: "
if len(grid_points) > 8:
for i, gp in enumerate(grid_points):
if i % 10 == 0:
text += "\n"
text += " "
text += "%d " % gp
else:
for gp in grid_points:
text += "%d " % gp
print(text)
if cutoff_frequency:
print("Cutoff frequency: %s" % cutoff_frequency)
if (settings.use_ave_pp and
(settings.is_bterta or settings.is_lbte)):
print("Use averaged ph-ph interaction")
const_ave_pp = settings.constant_averaged_pp_interaction
if (const_ave_pp is not None and
(settings.is_bterta or settings.is_lbte)):
print("Constant ph-ph interaction: %6.3e" % const_ave_pp)
print("Frequency conversion factor to THz: %9.5f" %
frequency_factor_to_THz)
if frequency_scale_factor is not None:
print("Frequency scale factor: %8.5f" % frequency_scale_factor)
if (settings.is_joint_dos or
settings.is_imag_self_energy or
settings.is_real_self_energy or
settings.is_spectral_function):
if frequency_step is not None:
print("Frequency step for spectrum: %s" % frequency_step)
if num_frequency_points is not None:
print("Number of frequency sampling points: %d" %
num_frequency_points)
sys.stdout.flush()
|
|
"""
playa.ext.audio.index
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import hashlib
import os.path
import re
import time
import threading
from collections import defaultdict
from mutagen.easymp4 import EasyMP4
from mutagen.mp3 import EasyMP3
from playa.common.storage import load, save
def get_metadata(full_path):
metadata = {
'filename': os.path.basename(full_path)[:-4],
}
if full_path.endswith('mp4') or full_path.endswith('m4a'):
id3_cls = EasyMP4
elif full_path.endswith('mp3'):
id3_cls = EasyMP3
else:
id3_cls = None
if id3_cls:
try:
audio = id3_cls(full_path)
except Exception, e:
print e
audio = None
if audio:
for key in ('artist', 'title', 'album', 'genre'):
try:
value = unicode(audio[key][0])
except (IndexError, KeyError):
continue
metadata[key] = value
metadata['length'] = audio.info.length
return metadata
def get_key(full_path):
metadata = get_metadata(full_path)
key = tuple([metadata.get(k) for k in ('artist', 'title', 'album')])
if not any(key):
key = hashlib.md5(full_path).hexdigest()
return key
class AudioIndex(threading.Thread):
RE_SEARCH_TOKENS = re.compile(r'\b([^\:]+):("[^"]*"|[^\s]*)')
def __init__(self, app, filter_keys, text_keys):
super(AudioIndex, self).__init__()
self.app = app
self.filter_keys = filter_keys
self.text_keys = text_keys
self.tokenized = defaultdict(lambda:defaultdict(int))
self.filters = defaultdict(lambda:defaultdict(list))
self.filters_ci = defaultdict(lambda:defaultdict(list))
self.metadata = defaultdict(dict)
self.files = {}
self._data_file = os.path.join(self.app.config['DATA_PATH'], 'index.db')
self._ready = False
def __len__(self):
return len(self.files)
def run(self):
if os.path.exists(self._data_file):
self.load()
self._ready = True
while True:
start = time.time()
print "Building audio index"
prev = self.files.keys()
for key, full_path in self.files.iteritems():
if not os.path.exists(full_path):
del self.files[key]
for path in self.app.config['AUDIO_PATHS']:
self.add_path(path)
print "Done! (%d entries, took %.2fs)" % (len(self), time.time() - start)
self._ready = True
if self.files.keys() != prev:
self.save()
time.sleep(3)
def load(self):
results = load(self._data_file)
if not results:
return
for k, v in results.iteritems():
if k == 'files' and not isinstance(v, dict):
continue
if isinstance(v, dict):
getattr(self, k).update(v)
else:
setattr(self, k, v)
def save(self):
save(self._data_file, {
'tokenized': dict(self.tokenized),
'filters': dict(self.filters),
'filters_ci': dict(self.filters_ci),
'metadata': dict(self.metadata),
'files': self.files,
})
def add_path(self, path):
dir_list = [path]
while dir_list:
path = dir_list.pop()
for fn in os.listdir(path):
if fn.startswith('.'):
continue
full_path = os.path.join(path, fn)
try:
unicode(full_path)
except:
continue
if os.path.isdir(full_path):
dir_list.append(full_path)
continue
elif full_path in self.files.values():
continue
try:
metadata = get_metadata(full_path)
except Exception, e:
print e
continue
tokens = []
for key, value in metadata.iteritems():
if key in self.text_keys:
tokens.extend(filter(None, value.lower().split(' ')))
if key in self.filter_keys:
self.filters[key][value].append(full_path)
self.filters_ci[key][value.lower()].append(full_path)
self.metadata[full_path] = metadata
self.files[get_key(full_path)] = full_path
for token in tokens:
self.tokenized[token][full_path] += 1
def search(self, query):
text_results = defaultdict(int)
filter_results = defaultdict(int)
tokens = self._get_search_query_tokens(query.lower())
text_tokens = tokens.pop('', None)
for token, value in tokens.iteritems():
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
if value in self.filters_ci[token]:
for full_path in self.filters_ci[token][value]:
filter_results[full_path] += 1
if tokens and not filter_results:
return None
if text_tokens:
for token in text_tokens.split(' '):
for full_path, count in self.tokenized[token].iteritems():
text_results[full_path] += count
if filter_results:
# We need to remove any results which didnt match filters
results = {}
for full_path, count in filter_results.iteritems():
if not text_tokens or text_results[full_path]:
results[full_path] = text_results[full_path] + count
else:
results = text_results
if not results:
return None
return sorted(results.items(), key=lambda x: -x[1])[:50]
def _get_search_query_tokens(self, query):
"""
Parses a search query for supported tokens and returns a dictionary.
e.g., "author:test my message" -> {'author': 'test', '': 'my message'}
"""
tokens = defaultdict(str)
def _token_repl(matchobj):
tokens[matchobj.group(1)] = matchobj.group(2)
return ''
query = self.RE_SEARCH_TOKENS.sub(_token_repl, query.strip()).strip()
if query:
tokens[''] = query
return tokens
|
|
#!/usr/bin/env python
# coding=utf-8
import random,os,sys,unittest,run_app,codecs
reload(sys)
sys.setdefaultencoding( "utf-8" )
class TestCaseUnit(unittest.TestCase):
def test_positive_cmd1(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd1-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd1-positive"))
def test_positive_cmd10(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd10-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd10-positive"))
def test_positive_cmd100(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd100-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd100-positive"))
def test_positive_cmd101(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd101-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd101-positive"))
def test_positive_cmd102(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd102-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd102-positive"))
def test_positive_cmd103(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd103-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd103-positive"))
def test_positive_cmd104(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd104-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd104-positive"))
def test_positive_cmd105(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd105-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd105-positive"))
def test_positive_cmd106(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd106-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd106-positive"))
def test_positive_cmd107(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd107-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd107-positive"))
def test_positive_cmd108(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd108-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd108-positive"))
def test_positive_cmd109(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd109-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd109-positive"))
def test_positive_cmd11(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd11-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd11-positive"))
def test_positive_cmd110(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd110-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd110-positive"))
def test_positive_cmd111(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd111-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd111-positive"))
def test_positive_cmd112(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd112-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd112-positive"))
def test_positive_cmd113(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd113-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd113-positive"))
def test_positive_cmd114(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd114-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd114-positive"))
def test_positive_cmd115(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd115-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd115-positive"))
def test_positive_cmd116(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd116-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd116-positive"))
def test_positive_cmd117(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd117-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd117-positive"))
def test_positive_cmd118(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd118-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd118-positive"))
def test_positive_cmd119(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd119-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd119-positive"))
def test_positive_cmd12(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd12-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd12-positive"))
def test_positive_cmd120(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd120-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd120-positive"))
def test_positive_cmd121(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd121-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd121-positive"))
def test_positive_cmd122(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd122-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd122-positive"))
def test_positive_cmd123(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd123-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd123-positive"))
def test_positive_cmd124(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd124-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd124-positive"))
def test_positive_cmd125(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd125-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd125-positive"))
def test_positive_cmd126(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd126-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd126-positive"))
def test_positive_cmd127(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd127-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd127-positive"))
def test_positive_cmd128(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd128-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd128-positive"))
def test_positive_cmd129(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd129-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd129-positive"))
def test_positive_cmd13(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd13-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd13-positive"))
def test_positive_cmd130(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd130-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd130-positive"))
def test_positive_cmd131(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd131-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd131-positive"))
def test_positive_cmd132(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd132-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd132-positive"))
def test_positive_cmd133(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd133-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd133-positive"))
def test_positive_cmd134(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd134-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd134-positive"))
def test_positive_cmd135(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd135-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd135-positive"))
def test_positive_cmd136(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd136-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd136-positive"))
def test_positive_cmd137(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd137-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd137-positive"))
def test_positive_cmd138(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd138-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd138-positive"))
def test_positive_cmd139(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd139-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd139-positive"))
def test_positive_cmd14(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd14-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd14-positive"))
def test_positive_cmd140(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd140-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd140-positive"))
def test_positive_cmd141(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd141-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd141-positive"))
def test_positive_cmd142(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd142-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd142-positive"))
def test_positive_cmd143(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd143-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd143-positive"))
def test_positive_cmd144(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd144-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd144-positive"))
def test_positive_cmd145(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd145-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd145-positive"))
def test_positive_cmd146(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd146-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd146-positive"))
def test_positive_cmd147(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd147-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd147-positive"))
def test_positive_cmd148(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd148-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd148-positive"))
def test_positive_cmd149(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd149-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd149-positive"))
def test_positive_cmd15(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd15-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd15-positive"))
def test_positive_cmd150(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd150-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd150-positive"))
def test_positive_cmd151(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd151-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd151-positive"))
def test_positive_cmd152(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd152-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd152-positive"))
def test_positive_cmd153(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd153-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd153-positive"))
def test_positive_cmd154(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd154-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd154-positive"))
def test_positive_cmd155(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd155-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd155-positive"))
def test_positive_cmd156(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd156-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd156-positive"))
def test_positive_cmd157(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd157-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd157-positive"))
def test_positive_cmd158(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd158-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd158-positive"))
def test_positive_cmd159(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd159-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd159-positive"))
def test_positive_cmd16(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd16-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd16-positive"))
def test_positive_cmd160(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd160-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd160-positive"))
def test_negative_cmd161(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd161-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd161-negative"))
def test_negative_cmd162(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd162-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd162-negative"))
def test_negative_cmd163(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd163-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd163-negative"))
def test_negative_cmd164(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd164-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd164-negative"))
def test_negative_cmd165(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd165-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd165-negative"))
def test_negative_cmd166(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd166-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd166-negative"))
def test_negative_cmd167(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd167-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd167-negative"))
def test_negative_cmd168(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd168-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd168-negative"))
def test_negative_cmd169(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd169-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd169-negative"))
def test_positive_cmd17(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd17-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd17-positive"))
def test_negative_cmd170(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd170-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd170-negative"))
def test_negative_cmd171(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd171-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd171-negative"))
def test_negative_cmd172(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd172-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd172-negative"))
def test_negative_cmd173(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd173-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd173-negative"))
def test_negative_cmd174(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd174-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd174-negative"))
def test_negative_cmd175(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd175-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd175-negative"))
def test_negative_cmd176(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd176-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd176-negative"))
def test_negative_cmd177(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd177-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd177-negative"))
def test_negative_cmd178(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd178-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd178-negative"))
def test_negative_cmd179(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd179-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd179-negative"))
def test_positive_cmd18(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd18-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd18-positive"))
def test_negative_cmd180(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd180-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd180-negative"))
def test_negative_cmd181(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd181-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd181-negative"))
def test_negative_cmd182(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd182-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd182-negative"))
def test_negative_cmd183(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd183-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd183-negative"))
def test_negative_cmd184(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd184-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd184-negative"))
def test_negative_cmd185(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd185-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd185-negative"))
def test_negative_cmd186(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd186-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd186-negative"))
def test_negative_cmd187(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd187-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd187-negative"))
def test_negative_cmd188(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd188-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd188-negative"))
def test_negative_cmd189(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd189-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd189-negative"))
def test_positive_cmd19(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd19-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd19-positive"))
def test_negative_cmd190(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd190-negative", "/opt/wrt-packertool-android-tests/apks/arm/cmd190-negative"))
def test_positive_cmd2(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd2-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd2-positive"))
def test_positive_cmd20(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd20-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd20-positive"))
def test_positive_cmd21(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd21-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd21-positive"))
def test_positive_cmd22(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd22-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd22-positive"))
def test_positive_cmd23(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd23-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd23-positive"))
def test_positive_cmd24(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd24-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd24-positive"))
def test_positive_cmd25(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd25-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd25-positive"))
def test_positive_cmd26(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd26-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd26-positive"))
def test_positive_cmd27(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd27-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd27-positive"))
def test_positive_cmd28(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd28-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd28-positive"))
def test_positive_cmd29(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd29-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd29-positive"))
def test_positive_cmd3(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd3-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd3-positive"))
def test_positive_cmd30(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd30-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd30-positive"))
def test_positive_cmd31(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd31-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd31-positive"))
def test_positive_cmd32(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd32-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd32-positive"))
def test_positive_cmd33(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd33-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd33-positive"))
def test_positive_cmd34(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd34-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd34-positive"))
def test_positive_cmd35(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd35-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd35-positive"))
def test_positive_cmd36(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd36-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd36-positive"))
def test_positive_cmd37(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd37-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd37-positive"))
def test_positive_cmd38(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd38-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd38-positive"))
def test_positive_cmd39(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd39-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd39-positive"))
def test_positive_cmd4(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd4-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd4-positive"))
def test_positive_cmd40(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd40-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd40-positive"))
def test_positive_cmd41(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd41-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd41-positive"))
def test_positive_cmd42(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd42-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd42-positive"))
def test_positive_cmd43(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd43-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd43-positive"))
def test_positive_cmd44(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd44-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd44-positive"))
def test_positive_cmd45(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd45-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd45-positive"))
def test_positive_cmd46(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd46-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd46-positive"))
def test_positive_cmd47(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd47-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd47-positive"))
def test_positive_cmd48(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd48-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd48-positive"))
def test_positive_cmd49(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd49-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd49-positive"))
def test_positive_cmd5(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd5-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd5-positive"))
def test_positive_cmd50(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd50-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd50-positive"))
def test_positive_cmd51(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd51-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd51-positive"))
def test_positive_cmd52(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd52-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd52-positive"))
def test_positive_cmd53(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd53-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd53-positive"))
def test_positive_cmd54(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd54-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd54-positive"))
def test_positive_cmd55(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd55-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd55-positive"))
def test_positive_cmd56(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd56-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd56-positive"))
def test_positive_cmd57(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd57-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd57-positive"))
def test_positive_cmd58(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd58-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd58-positive"))
def test_positive_cmd59(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd59-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd59-positive"))
def test_positive_cmd6(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd6-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd6-positive"))
def test_positive_cmd60(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd60-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd60-positive"))
def test_positive_cmd61(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd61-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd61-positive"))
def test_positive_cmd62(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd62-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd62-positive"))
def test_positive_cmd63(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd63-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd63-positive"))
def test_positive_cmd64(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd64-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd64-positive"))
def test_positive_cmd65(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd65-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd65-positive"))
def test_positive_cmd66(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd66-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd66-positive"))
def test_positive_cmd67(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd67-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd67-positive"))
def test_positive_cmd68(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd68-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd68-positive"))
def test_positive_cmd69(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd69-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd69-positive"))
def test_positive_cmd7(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd7-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd7-positive"))
def test_positive_cmd70(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd70-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd70-positive"))
def test_positive_cmd71(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd71-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd71-positive"))
def test_positive_cmd72(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd72-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd72-positive"))
def test_positive_cmd73(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd73-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd73-positive"))
def test_positive_cmd74(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd74-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd74-positive"))
def test_positive_cmd75(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd75-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd75-positive"))
def test_positive_cmd76(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd76-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd76-positive"))
def test_positive_cmd77(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd77-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd77-positive"))
def test_positive_cmd78(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd78-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd78-positive"))
def test_positive_cmd79(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd79-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd79-positive"))
def test_positive_cmd8(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd8-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd8-positive"))
def test_positive_cmd80(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd80-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd80-positive"))
def test_positive_cmd81(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd81-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd81-positive"))
def test_positive_cmd82(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd82-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd82-positive"))
def test_positive_cmd83(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd83-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd83-positive"))
def test_positive_cmd84(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd84-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd84-positive"))
def test_positive_cmd85(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd85-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd85-positive"))
def test_positive_cmd86(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd86-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd86-positive"))
def test_positive_cmd87(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd87-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd87-positive"))
def test_positive_cmd88(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd88-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd88-positive"))
def test_positive_cmd89(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd89-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd89-positive"))
def test_positive_cmd9(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd9-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd9-positive"))
def test_positive_cmd90(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd90-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd90-positive"))
def test_positive_cmd91(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd91-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd91-positive"))
def test_positive_cmd92(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd92-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd92-positive"))
def test_positive_cmd93(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd93-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd93-positive"))
def test_positive_cmd94(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd94-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd94-positive"))
def test_positive_cmd95(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd95-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd95-positive"))
def test_positive_cmd96(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd96-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd96-positive"))
def test_positive_cmd97(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd97-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd97-positive"))
def test_positive_cmd98(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd98-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd98-positive"))
def test_positive_cmd99(self):
self.assertEqual("PASS", run_app.tryRunApp("cmd99-positive", "/opt/wrt-packertool-android-tests/apks/arm/cmd99-positive"))
if __name__ == '__main__':
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import traceback
from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import local
from nova.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
'nova.version': <RPC Envelope Version as a String>,
'nova.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'nova.version'
_MESSAGE_KEY = 'nova.message'
# TODO(russellb) Turn this on after Grizzly.
_SEND_RPC_ENVELOPE = False
class RPCException(Exception):
message = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.message % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.message
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
message = _("Timeout while waiting on RPC response.")
class InvalidRPCConnectionReuse(RPCException):
message = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
message = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
message = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
topic, except that it is part of a set of such consumers (the
"pool") which may run in parallel. Every pool of workers will
receive a given message, but only one worker in the pool will
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
:param proxy: The object that will handle all incoming messages.
:param pool_name: String containing the name of the pool of workers
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = {'set_admin_password': [('args', 'new_pass')],
'run_instance': [('args', 'admin_password')],
'route_message': [('args', 'message', 'args', 'method_info',
'method_kwargs', 'password'),
('args', 'message', 'args', 'method_info',
'method_kwargs', 'admin_password')]}
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
has_context_token = '_context_auth_token' in msg_data
has_token = 'auth_token' in msg_data
if not any([has_method, has_context_token, has_token]):
return log_func(msg, msg_data)
msg_data = copy.deepcopy(msg_data)
if has_method:
for arg in SANITIZE.get(msg_data['method'], []):
try:
d = msg_data
for elem in arg[:-1]:
d = d[elem]
d[arg[-1]] = '<SANITIZED>'
except KeyError, e:
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
{'item': arg,
'err': e})
if has_context_token:
msg_data['_context_auth_token'] = '<SANITIZED>'
if has_token:
msg_data['auth_token'] = '<SANITIZED>'
return log_func(msg, msg_data)
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"), unicode(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
data = {
'class': str(failure.__class__.__name__),
'module': str(failure.__class__.__module__),
'message': unicode(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if not module in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(**failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
local.store.context = self
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
# TODO(russellb) This method is a bit of a nova-ism. It makes
# some assumptions about the data in the request context sent
# across rpc, while the rest of this class does not. We could get
# rid of this if we changed the nova code that uses this to
# convert the RpcContext back to its native RequestContext doing
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
context = self.deepcopy()
context.values['is_admin'] = True
context.values.setdefault('roles', [])
if 'admin' not in context.values['roles']:
context.values['roles'].append('admin')
if read_deleted is not None:
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""This encapsulates some actual exception that is expected to be
hit by an RPC proxy object. Merely instantiating it records the
current exception information, which will be passed back to the
RPC client without exceptional logging."""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception, e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer."""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible.
:param imp_version: The version implemented
:param version: The version requested by an incoming message.
"""
version_parts = version.split('.')
imp_version_parts = imp_version.split('.')
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
return False
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
return False
return True
def serialize_msg(raw_msg, force_envelope=False):
if not _SEND_RPC_ENVELOPE and not force_envelope:
return raw_msg
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg
|
|
"""Wrapper to call Phabricator's Maniphest Conduit API."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlcon_maniphest
#
# Public Functions:
# create_task
# update_task
# query
#
# Public Assignments:
# PRIORITIES
# PRIORITY_DESCRIPTIONS
# PRIORITY_DESCRIPTIONS_TO_VALUES
# STATUSES
# STATUS_FILTERS
# ORDERS
# CreateTaskResponse
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import phlsys_namedtuple
# Enumerate the priorities that a Maniphest task may have
# from ManiphestTaskPriority.php
PRIORITIES = {
'unbreak_now': 100,
'triage': 90,
'high': 80,
'normal': 50,
'low': 25,
'wish': 0,
}
# from ManiphestTaskPriority.php
PRIORITY_DESCRIPTIONS = {
PRIORITIES['unbreak_now']: 'Unbreak Now!',
PRIORITIES['triage']: 'Needs Triage',
PRIORITIES['high']: 'High',
PRIORITIES['normal']: 'Normal',
PRIORITIES['low']: 'Low',
PRIORITIES['wish']: 'Wishlist',
}
# from ManiphestTaskPriority.php
PRIORITY_DESCRIPTIONS_TO_VALUES = {
desc: val for val, desc in PRIORITY_DESCRIPTIONS.iteritems()
}
# from ManiphestTaskStatus.php
STATUSES = {
0: 'Open',
1: 'Resolved',
2: 'Wontfix',
3: 'Invalid',
4: 'Duplicate',
5: 'Spite',
}
# from ManiphestTaskQuery.php
STATUS_FILTERS = {
'any': 'status-any',
'open': 'status-open',
'closed': 'status-closed',
'resolved': 'status-resolved',
'wontfix': 'status-wontfix',
'invalid': 'status-invalid',
'spite': 'status-spite',
'duplicate': 'status-duplicate',
}
# from ManiphestTaskQuery.php
ORDERS = {
'priority': 'order-priority',
'created': 'order-created',
'modified': 'order-modified',
'title': 'order-title',
}
CreateTaskResponse = phlsys_namedtuple.make_named_tuple(
'CreateTaskResponse',
required=[
'id', 'uri', 'title', 'status', 'priority',
'authorPHID', 'phid', 'description', 'objectName',
'auxiliary', 'ccPHIDs', 'ownerPHID', 'dateModified',
'dateCreated', 'projectPHIDs'
],
defaults={
'statusName': None,
},
ignored=[
'priorityColor', 'isClosed', 'dependsOnTaskPHIDs'
]
)
def create_task(
conduit,
title,
description="",
priority=None,
owner=None,
ccs=None,
projects=None):
"""Create a new Maniphest task using the supplied 'conduit'.
:conduit: supports call()
:title: string title of the new task
:description: string long description of the new task
:priority: integer priority of the new task (see PRIORITIES)
:owner: PHID of the owner or None
:ccs: PHIDs of the users to cc or None
:projects: PHIDs of the projects to add to or None
:returns: a CreateTaskResponse
"""
d = {
"title": title,
"description": description,
}
if priority is not None:
d['priority'] = priority
if owner is not None:
d['ownerPHID'] = owner
if ccs is not None:
d['ccPHIDs'] = ccs
if projects is not None:
d['projectPHIDs'] = projects
response = conduit("maniphest.createtask", d)
return CreateTaskResponse(**response)
def update_task(
conduit,
id,
title=None,
description=None,
priority=None,
owner=None,
ccs=None,
projects=None,
comment=None):
"""Update a Maniphest task using the supplied 'conduit'.
:conduit: supports call()
:id: the id of the task to update
:title: new string title of the new task or None
:description: new string long description of the new task or None
:priority: new integer priority of the new task (see PRIORITIES) or None
:owner: PHID of the owner or None
:ccs: PHIDs of the users to cc or None
:projects: PHIDs of the projects to add to or None
:comment: string comment to make on the task or None
:returns: a CreateTaskResponse
"""
d = {
"id": id,
}
if title is not None:
d['title'] = title
if description is not None:
d['description'] = description
if priority is not None:
d['priority'] = priority
if owner is not None:
d['ownerPHID'] = owner
if ccs is not None:
d['ccPHIDs'] = ccs
if projects is not None:
d['projectPHIDs'] = projects
if comment is not None:
d['comments'] = comment
response = conduit("maniphest.update", d)
return CreateTaskResponse(**response)
def query(
conduit,
ids=None,
authors=None,
owners=None,
ccs=None,
projects=None,
status=None,
limit=None,
offset=None,
order=None,
text=None):
"""Query Maniphest tasks using the supplied 'conduit'.
:conduit: supports call()
:ids: a list of specific task ids to restrict the query to
:authors: a list of author PHIDs to restrict the query to (any of)
:owners: a list of owner PHIDs to restrict the query to (any of)
:ccs: a list of owner PHIDs to restrict the query to (any of)
:projects: a list of project PHIDs to restrict the query to (any of)
:status: a particular value of STATUS_FILTERS to apply
:limit: int limit of results to return, defaults to server value if None
:offset: int offset into the list of results to return
:order: one of ORDERS to impose an ordering on results
:text: string to search tasks for
:returns: a CreateTaskResponse
"""
d = {
'ids': ids,
'authorPHIDs': authors,
'ownerPHIDs': owners,
'ccPHIDs': ccs,
'projectPHIDs': projects,
'status': status,
'limit': limit,
'offset': offset,
'order': order,
'fullText': text,
}
response = conduit("maniphest.query", d)
result = []
if response:
# oddly we get an empty list instead of a dictionary if no results, so
# iteritems() isn't appropriate in that case.
result = [CreateTaskResponse(**v) for k, v in response.iteritems()]
# order is broken because conduit returns a dict (unordered) instead of
# a list, we have to impose order here instead, note that it's still
# important to pass ordering to conduit in case there is a limit on the
# number of results returned
priority_desc_to_val = PRIORITY_DESCRIPTIONS_TO_VALUES
order_to_key = {
None: lambda x: -int(x.dateModified),
ORDERS['title']: lambda x: x.title,
ORDERS['created']: lambda x: -int(x.dateCreated),
ORDERS['modified']: lambda x: -int(x.dateModified),
ORDERS['priority']: lambda x: -priority_desc_to_val[x.priority],
}
result.sort(key=order_to_key[order])
return result
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
from concurrent import futures
import contextlib
import distutils.spawn
import errno
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
from six import moves
import grpc
from tests.unit.framework.common import test_constants
# Identifiers of entities we expect to find in the generated module.
STUB_IDENTIFIER = 'TestServiceStub'
SERVICER_IDENTIFIER = 'TestServiceServicer'
ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
class _ServicerMethods(object):
def __init__(self, response_pb2, payload_pb2):
self._condition = threading.Condition()
self._paused = False
self._fail = False
self._response_pb2 = response_pb2
self._payload_pb2 = payload_pb2
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = self._response_pb2.SimpleResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = self._response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = self._response_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = self._response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = self._response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
class _Service(
collections.namedtuple(
'_Service', ('servicer_methods', 'server', 'stub',))):
"""A live and running service.
Attributes:
servicer_methods: The _ServicerMethods servicing RPCs.
server: The grpc.Server servicing RPCs.
stub: A stub on which to invoke RPCs.
"""
def _CreateService(service_pb2, response_pb2, payload_pb2):
"""Provides a servicer backend and a stub.
Args:
service_pb2: The service_pb2 module generated by this test.
response_pb2: The response_pb2 module generated by this test.
payload_pb2: The payload_pb2 module generated by this test.
Returns:
A _Service with which to test RPCs.
"""
servicer_methods = _ServicerMethods(response_pb2, payload_pb2)
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
server = grpc.server(
(), futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
return _Service(servicer_methods, server, stub)
def _CreateIncompleteService(service_pb2):
"""Provides a servicer backend that fails to implement methods and its stub.
Args:
service_pb2: The service_pb2 module generated by this test.
Returns:
A _Service with which to test RPCs. The returned _Service's
servicer_methods implements none of the methods required of it.
"""
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
pass
server = grpc.server(
(), futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
return _Service(None, server, stub)
def _streaming_input_request_iterator(request_pb2, payload_pb2):
for _ in range(3):
request = request_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request(request_pb2):
request = request_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator(request_pb2):
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def setUp(self):
# Assume that the appropriate protoc and grpc_python_plugins are on the
# path.
protoc_command = 'protoc'
protoc_plugin_filename = distutils.spawn.find_executable(
'grpc_python_plugin')
if not os.path.isfile(protoc_command):
# Assume that if we haven't built protoc that it's on the system.
protoc_command = 'protoc'
# Ensure that the output directory exists.
self.outdir = tempfile.mkdtemp()
# Find all proto files
paths = []
root_dir = os.path.dirname(os.path.realpath(__file__))
proto_dir = os.path.join(root_dir, 'protos')
for walk_root, _, filenames in os.walk(proto_dir):
for filename in filenames:
if filename.endswith('.proto'):
path = os.path.join(walk_root, filename)
paths.append(path)
# Invoke protoc with the plugin.
cmd = [
protoc_command,
'--plugin=protoc-gen-python-grpc=%s' % protoc_plugin_filename,
'-I %s' % root_dir,
'--python_out=%s' % self.outdir,
'--python-grpc_out=%s' % self.outdir
] + paths
subprocess.check_call(' '.join(cmd), shell=True, env=os.environ,
cwd=os.path.dirname(os.path.realpath(__file__)))
# Generated proto directories dont include __init__.py, but
# these are needed for python package resolution
for walk_root, _, _ in os.walk(os.path.join(self.outdir, 'protos')):
path = os.path.join(walk_root, '__init__.py')
open(path, 'a').close()
sys.path.insert(0, self.outdir)
import protos.payload.test_payload_pb2 as payload_pb2
import protos.requests.r.test_requests_pb2 as request_pb2
import protos.responses.test_responses_pb2 as response_pb2
import protos.service.test_service_pb2 as service_pb2
self._payload_pb2 = payload_pb2
self._request_pb2 = request_pb2
self._response_pb2 = response_pb2
self._service_pb2 = service_pb2
def tearDown(self):
try:
shutil.rmtree(self.outdir)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
sys.path.remove(self.outdir)
def testImportAttributes(self):
# check that we can access the generated module and its members.
self.assertIsNotNone(
getattr(self._service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
def testUpDown(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
self.assertIsNotNone(service.servicer_methods)
self.assertIsNotNone(service.server)
self.assertIsNotNone(service.stub)
def testIncompleteServicer(self):
service = _CreateIncompleteService(self._service_pb2)
request = self._request_pb2.SimpleRequest(response_size=13)
with self.assertRaises(grpc.RpcError) as exception_context:
service.stub.UnaryCall(request)
self.assertIs(
exception_context.exception.code(), grpc.StatusCode.UNIMPLEMENTED)
def testUnaryCall(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request = self._request_pb2.SimpleRequest(response_size=13)
response = service.stub.UnaryCall(request)
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallFuture(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request = self._request_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response = response_future.result()
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testUnaryCallFutureExpired(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request = self._request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(
exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
def testUnaryCallFutureCancelled(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request = self._request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response_future.cancel()
self.assertTrue(response_future.cancelled())
self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
def testUnaryCallFutureFailed(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request = self._request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.fail():
response_future = service.stub.UnaryCall.future(request)
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
def testStreamingOutputCall(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request = _streaming_output_request(self._request_pb2)
responses = service.stub.StreamingOutputCall(request)
expected_responses = service.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testStreamingOutputCallExpired(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request = _streaming_output_request(self._request_pb2)
with service.servicer_methods.pause():
responses = service.stub.StreamingOutputCall(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(
exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
def testStreamingOutputCallCancelled(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request = _streaming_output_request(self._request_pb2)
responses = service.stub.StreamingOutputCall(request)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
def testStreamingOutputCallFailed(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request = _streaming_output_request(self._request_pb2)
with service.servicer_methods.fail():
responses = service.stub.StreamingOutputCall(request)
self.assertIsNotNone(responses)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN)
def testStreamingInputCall(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
response = service.stub.StreamingInputCall(
_streaming_input_request_iterator(
self._request_pb2, self._payload_pb2))
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(self._request_pb2, self._payload_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFuture(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(
self._request_pb2, self._payload_pb2))
response = response_future.result()
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(self._request_pb2, self._payload_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFutureExpired(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(
self._request_pb2, self._payload_pb2),
timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(
response_future.exception().code(), grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(
exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
def testStreamingInputCallFutureCancelled(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(
self._request_pb2, self._payload_pb2))
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
def testStreamingInputCallFutureFailed(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
with service.servicer_methods.fail():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(
self._request_pb2, self._payload_pb2))
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
def testFullDuplexCall(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
responses = service.stub.FullDuplexCall(
_full_duplex_request_iterator(self._request_pb2))
expected_responses = service.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(self._request_pb2),
'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testFullDuplexCallExpired(self):
request_iterator = _full_duplex_request_iterator(self._request_pb2)
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
with service.servicer_methods.pause():
responses = service.stub.FullDuplexCall(
request_iterator, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(
exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
def testFullDuplexCallCancelled(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
request_iterator = _full_duplex_request_iterator(self._request_pb2)
responses = service.stub.FullDuplexCall(request_iterator)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(
exception_context.exception.code(), grpc.StatusCode.CANCELLED)
def testFullDuplexCallFailed(self):
request_iterator = _full_duplex_request_iterator(self._request_pb2)
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
with service.servicer_methods.fail():
responses = service.stub.FullDuplexCall(request_iterator)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN)
def testHalfDuplexCall(self):
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
def half_duplex_request_iterator():
request = self._request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = self._request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
expected_responses = service.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = self._request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
service = _CreateService(
self._service_pb2, self._response_pb2, self._payload_pb2)
with wait():
responses = service.stub.HalfDuplexCall(
half_duplex_request_iterator(), timeout=test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(
exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
from common_fixtures import * # NOQA
from cattle import ApiError
@pytest.fixture(scope='module')
def config_id(client):
default_lb_config = client. \
create_loadBalancerConfig(name=random_str())
default_lb_config = client.wait_success(default_lb_config)
return default_lb_config.id
# test (C)
def test_lb_create_wo_config(client):
with pytest.raises(ApiError) as e:
client.create_loadBalancer(name=random_str())
assert e.value.error.status == 422
assert e.value.error.code == 'MissingRequired'
assert e.value.error.fieldName == 'loadBalancerConfigId'
# test (C)
def create_valid_lb(client, config_id):
test_lb = client. \
create_loadBalancer(name=random_str(),
loadBalancerConfigId=config_id)
test_lb = client.wait_success(test_lb)
return test_lb
def test_lb_create_w_config(client, config_id):
lb = create_valid_lb(client, config_id)
assert lb.state == 'active'
assert lb.loadBalancerConfigId == config_id
# test (D)
def test_lb_remove(client, config_id):
# create lb
lb = create_valid_lb(client, config_id)
# remove newly created lb
lb = client.wait_success(client.delete(lb))
assert lb.state == 'removed'
# test (U)
def test_lb_update(client, config_id):
# create lb
lb = create_valid_lb(client, config_id)
# update the lb
lb = client.update(lb, name='newName')
assert lb.name == 'newName'
def test_lb_add_target_instance(super_client, client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = lb.addtarget(instanceId=container.id)
validate_add_target(container, lb, super_client)
def test_lb_remove_target_instance(super_client, client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
lb = lb.addtarget(instanceId=container.id)
validate_add_target(container, lb, super_client)
# remove the target and verify that the target no longer exists
lb = lb.removetarget(instanceId=container.id)
validate_remove_target(container, lb, super_client)
def validate_add_target_ip(ip_address, lb, super_client):
target_maps = super_client. \
list_loadBalancerTarget(loadBalancerId=lb.id,
ipAddress=ip_address)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
super_client, target_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
assert target_map.ipAddress == ip_address
def test_lb_add_target_ip_address(client, context, config_id, super_client):
lb = create_valid_lb(client, config_id)
ip_address = "10.1.1.1"
lb = lb.addtarget(ipAddress=ip_address)
lb = super_client.wait_success(lb)
validate_add_target_ip(ip_address, lb, super_client)
def validate_remove_target_ip(ip_address, lb, super_client):
target_maps = super_client. \
list_loadBalancerTarget(loadBalancerId=lb.id,
ipAddress=ip_address)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
super_client, target_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def test_lb_remove_target_ip_address(client, context, config_id):
lb = create_valid_lb(client, config_id)
# add target to a load balancer and verify that it got created
ip_address = "10.1.1.1"
lb = lb.addtarget(ipAddress=ip_address)
validate_add_target_ip(ip_address, lb, client)
# remove the target and verify that the target no longer exists
lb = lb.removetarget(ipAddress="10.1.1.1")
validate_remove_target_ip(ip_address, lb, client)
def create_lb_and_container(client, context, config_id):
# create load balancer
lb = create_valid_lb(client, config_id)
# create a container, no need to start it
container = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
container = client.wait_success(container)
return container, lb
def test_lb_remove_w_target(client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = lb.addtarget(instanceId=container.id)
lb = client.wait_success(lb)
# remove the load balancer
lb = client.wait_success(client.delete(lb))
assert lb.state == 'removed'
validate_remove_target(container, lb, client)
def test_lb_remove_w_host(client, context, config_id):
host = context.host
# create lb, assign the hosts to it
lb = create_valid_lb(client, config_id)
lb = lb.addhost(hostId=host.id)
validate_add_host(host, lb, client)
# remove the load balancer
lb = client.wait_success(client.delete(lb))
assert lb.state == 'removed'
validate_remove_host(host, lb, client)
def validate_add_target(container1, lb, super_client):
target_maps = super_client. \
list_loadBalancerTarget(loadBalancerId=lb.id,
instanceId=container1.id)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
super_client, target_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
def validate_remove_target(container2, lb, super_client):
target_maps = super_client. \
list_loadBalancerTarget(loadBalancerId=lb.id,
instanceId=container2.id)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
super_client, target_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def test_set_target_instance(client, context, config_id):
container1, lb = create_lb_and_container(client, context, config_id)
container2 = client. \
create_container(imageUuid=context.image_uuid,
startOnCreate=False)
container2 = client.wait_success(container2)
# set 2 targets
lb = lb.settargets(instanceIds=[container1.id, container2.id])
lb = client.wait_success(lb)
validate_add_target(container1, lb, client)
validate_add_target(container2, lb, client)
# set 1 target
lb = lb.settargets(instanceIds=[container1.id])
validate_add_target(container1, lb, client)
validate_remove_target(container2, lb, client)
# set 0 targets
lb = lb.settargets(instanceIds=[])
validate_remove_target(container1, lb, client)
def test_lb_set_target_ip_address(client, context, config_id):
lb = create_valid_lb(client, config_id)
# set 2 targets
lb = lb.settargets(ipAddresses=["10.1.1.1", "10.1.1.2"])
validate_add_target_ip("10.1.1.1", lb, client)
validate_add_target_ip("10.1.1.2", lb, client)
# set 1 target
lb = lb.settargets(ipAddresses=["10.1.1.1"])
validate_add_target_ip("10.1.1.1", lb, client)
validate_remove_target_ip("10.1.1.2", lb, client)
# set 0 targets
lb = lb.settargets(ipAddresses=[])
validate_remove_target_ip("10.1.1.1", lb, client)
def test_set_target_instance_and_ip(client, context, config_id):
container1, lb = create_lb_and_container(client, context, config_id)
# set 2 targets - one ip and one instanceId
lb = lb.settargets(instanceIds=[container1.id],
ipAddresses="10.1.1.1")
validate_add_target(container1, lb, client)
validate_add_target_ip("10.1.1.1", lb, client)
def test_lb_add_target_instance_twice(client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = lb.addtarget(instanceId=container.id)
validate_add_target(container, lb, client)
with pytest.raises(ApiError) as e:
lb.addtarget(instanceId=container.id)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'instanceId'
def test_lb_remove_non_existing_target_instance(client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
# remove non-existing target
with pytest.raises(ApiError) as e:
lb.removetarget(instanceId=container.id)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
assert e.value.error.fieldName == 'instanceId'
def test_lb_add_target_ip_address_and_instance(client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
with pytest.raises(ApiError) as e:
lb.addtarget(ipAddress="10.1.1.1",
instanceId=container.id)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
assert e.value.error.fieldName == 'ipAddress'
def test_lb_add_target_w_no_option(client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
with pytest.raises(ApiError) as e:
lb.addtarget()
assert e.value.error.status == 422
assert e.value.error.code == 'MissingRequired'
assert e.value.error.fieldName == 'instanceId'
def test_lb_add_target_ip_twice(client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = lb.addtarget(ipAddress="10.1.1.1")
validate_add_target_ip("10.1.1.1", lb, client)
with pytest.raises(ApiError) as e:
lb.addtarget(ipAddress="10.1.1.1")
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'ipAddress'
def test_lb_remove_non_existing_target_ip(client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
# remove non-existing target
with pytest.raises(ApiError) as e:
lb.removetarget(ipAddress="10.1.1.1")
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
assert e.value.error.fieldName == 'ipAddress'
def test_add_removed_target_again(client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = lb.addtarget(instanceId=container.id)
validate_add_target(container, lb, client)
# remove the target
lb = lb.removetarget(instanceId=container.id)
validate_remove_target(container, lb, client)
# add the target - should be allowed
lb.addtarget(instanceId=container.id)
def test_destroy_container(client, context, config_id):
container, lb = create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = lb.addtarget(instanceId=container.id)
validate_add_target(container, lb, client)
# destroy the instance
# stop the lb instance
container = client.wait_success(container)
if container.state == 'running':
container = client.wait_success(container.stop())
assert container.state == 'stopped'
# remove the lb instance
container = client.wait_success(container.remove())
assert container.state == 'removed'
validate_remove_target(container, lb, client)
def _resource_is_active(resource):
return resource.state == 'active'
def _resource_is_removed(resource):
return resource.state == 'removed'
def validate_add_host(host, lb, super_client):
host_maps = super_client. \
list_loadBalancerHostMap(loadBalancerId=lb.id,
hostId=host.id)
assert len(host_maps) == 1
host_map = host_maps[0]
wait_for_condition(
super_client, host_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
assert host_map.hostId == host.id
def validate_remove_host(host, lb, super_client):
host_maps = super_client. \
list_loadBalancerHostMap(loadBalancerId=lb.id,
hostId=host.id)
assert len(host_maps) == 1
host_map = host_maps[0]
wait_for_condition(
super_client, host_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
return host_map
|
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""PrograReader and ProgramEditor need to be able to read UserRole resources.
Revision ID: 3e08ed6b47b8
Revises: 2785a204a673
Create Date: 2013-12-18 22:58:18.613406
"""
# revision identifiers, used by Alembic.
revision = '3e08ed6b47b8'
down_revision = '2785a204a673'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
column('scope', sa.String),
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
update_role_permissions('ProgramReader', {
"read": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Program",
"ProgramControl",
"ProgramDirective",
"Relationship",
"ObjectFolder",
"UserRole",
],
"create": [],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [],
"delete": []
})
update_role_permissions('ProgramEditor', {
"read": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Program",
"ProgramControl",
"ProgramDirective",
"Relationship",
"ObjectFolder",
"UserRole",
],
"create": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"ProgramControl",
"ProgramDirective",
"Relationship",
"ObjectFolder"
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Program",
"ProgramControl",
"ProgramDirective",
"Relationship"
],
"delete": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"ProgramControl",
"ProgramDirective",
"Relationship",
"ObjectFolder"
]
})
update_role_permissions('ProgramAuditOwner', {
"read": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"UserRole",
"Audit",
"ObjectFolder",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"create": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"UserRole",
"Audit",
"ObjectFolder",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting",
"Response",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Audit",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"delete": [
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
]
})
update_role_permissions('ProgramAuditEditor', {
"read": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Audit",
"ObjectFolder",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"create": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"ObjectFolder",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting",
"Response",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Audit",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"delete": [
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
]
})
def downgrade():
update_role_permissions('ProgramReader', {
"read": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Program",
"ProgramControl",
"ProgramDirective",
"Relationship",
"ObjectFolder"
],
"create": [],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [],
"delete": []
})
update_role_permissions('ProgramEditor', {
"read": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Program",
"ProgramControl",
"ProgramDirective",
"Relationship",
"ObjectFolder"
],
"create": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"ProgramControl",
"ProgramDirective",
"Relationship",
"ObjectFolder"
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Program",
"ProgramControl",
"ProgramDirective",
"Relationship"
],
"delete": [
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"ProgramControl",
"ProgramDirective",
"Relationship",
"ObjectFolder"
]
})
update_role_permissions('ProgramAuditOwner', {
"read": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"UserRole",
"Audit",
"ObjectFolder",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"create": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"UserRole",
"Audit",
"ObjectFolder",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Audit",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"delete": [
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
]
})
update_role_permissions('ProgramAuditEditor', {
"read": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Audit",
"ObjectFolder",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"create": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"ObjectFolder",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Audit",
"Meeting",
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
],
"delete": [
"ObjectControl",
"ObjectDocument",
"ObjectObjective",
"ObjectPerson",
"ObjectSection",
"Relationship",
"Document",
"Meeting"
]
})
|
|
from __future__ import unicode_literals
import json
from moto.core.responses import BaseResponse
from .models import kinesis_backends
from werkzeug.exceptions import BadRequest
class KinesisResponse(BaseResponse):
@property
def parameters(self):
return json.loads(self.body.decode("utf-8"))
@property
def kinesis_backend(self):
return kinesis_backends[self.region]
@property
def is_firehose(self):
host = self.headers.get('host') or self.headers['Host']
return host.startswith('firehose')
def create_stream(self):
stream_name = self.parameters.get('StreamName')
shard_count = self.parameters.get('ShardCount')
self.kinesis_backend.create_stream(stream_name, shard_count, self.region)
return ""
def describe_stream(self):
stream_name = self.parameters.get('StreamName')
stream = self.kinesis_backend.describe_stream(stream_name)
return json.dumps(stream.to_json())
def list_streams(self):
streams = self.kinesis_backend.list_streams()
return json.dumps({
"HasMoreStreams": False,
"StreamNames": [stream.stream_name for stream in streams],
})
def delete_stream(self):
stream_name = self.parameters.get("StreamName")
self.kinesis_backend.delete_stream(stream_name)
return ""
def get_shard_iterator(self):
stream_name = self.parameters.get("StreamName")
shard_id = self.parameters.get("ShardId")
shard_iterator_type = self.parameters.get("ShardIteratorType")
starting_sequence_number = self.parameters.get("StartingSequenceNumber")
shard_iterator = self.kinesis_backend.get_shard_iterator(
stream_name, shard_id, shard_iterator_type, starting_sequence_number,
)
return json.dumps({
"ShardIterator": shard_iterator
})
def get_records(self):
shard_iterator = self.parameters.get("ShardIterator")
limit = self.parameters.get("Limit")
next_shard_iterator, records = self.kinesis_backend.get_records(shard_iterator, limit)
return json.dumps({
"NextShardIterator": next_shard_iterator,
"Records": [record.to_json() for record in records]
})
def put_record(self):
if self.is_firehose:
return self.firehose_put_record()
stream_name = self.parameters.get("StreamName")
partition_key = self.parameters.get("PartitionKey")
explicit_hash_key = self.parameters.get("ExplicitHashKey")
sequence_number_for_ordering = self.parameters.get("SequenceNumberForOrdering")
data = self.parameters.get("Data")
sequence_number, shard_id = self.kinesis_backend.put_record(
stream_name, partition_key, explicit_hash_key, sequence_number_for_ordering, data
)
return json.dumps({
"SequenceNumber": sequence_number,
"ShardId": shard_id,
})
def put_records(self):
if self.is_firehose:
return self.put_record_batch()
stream_name = self.parameters.get("StreamName")
records = self.parameters.get("Records")
response = self.kinesis_backend.put_records(
stream_name, records
)
return json.dumps(response)
def split_shard(self):
stream_name = self.parameters.get("StreamName")
shard_to_split = self.parameters.get("ShardToSplit")
new_starting_hash_key = self.parameters.get("NewStartingHashKey")
response = self.kinesis_backend.split_shard(
stream_name, shard_to_split, new_starting_hash_key
)
return ""
def merge_shards(self):
stream_name = self.parameters.get("StreamName")
shard_to_merge = self.parameters.get("ShardToMerge")
adjacent_shard_to_merge = self.parameters.get("AdjacentShardToMerge")
response = self.kinesis_backend.merge_shards(
stream_name, shard_to_merge, adjacent_shard_to_merge
)
return ""
''' Firehose '''
def create_delivery_stream(self):
stream_name = self.parameters['DeliveryStreamName']
redshift_config = self.parameters.get('RedshiftDestinationConfiguration')
if redshift_config:
redshift_s3_config = redshift_config['S3Configuration']
stream_kwargs = {
'redshift_username': redshift_config['Username'],
'redshift_password': redshift_config['Password'],
'redshift_jdbc_url': redshift_config['ClusterJDBCURL'],
'redshift_role_arn': redshift_config['RoleARN'],
'redshift_copy_command': redshift_config['CopyCommand'],
'redshift_s3_role_arn': redshift_s3_config['RoleARN'],
'redshift_s3_bucket_arn': redshift_s3_config['BucketARN'],
'redshift_s3_prefix': redshift_s3_config['Prefix'],
'redshift_s3_compression_format': redshift_s3_config.get('CompressionFormat'),
'redshift_s3_buffering_hings': redshift_s3_config['BufferingHints'],
}
stream = self.kinesis_backend.create_delivery_stream(stream_name, **stream_kwargs)
return json.dumps({
'DeliveryStreamARN': stream.arn
})
def describe_delivery_stream(self):
stream_name = self.parameters["DeliveryStreamName"]
stream = self.kinesis_backend.get_delivery_stream(stream_name)
return json.dumps(stream.to_dict())
def list_delivery_streams(self):
streams = self.kinesis_backend.list_delivery_streams()
return json.dumps({
"DeliveryStreamNames": [
stream.name for stream in streams
],
"HasMoreDeliveryStreams": False
})
def delete_delivery_stream(self):
stream_name = self.parameters['DeliveryStreamName']
self.kinesis_backend.delete_delivery_stream(stream_name)
return json.dumps({})
def firehose_put_record(self):
stream_name = self.parameters['DeliveryStreamName']
record_data = self.parameters['Record']['Data']
record = self.kinesis_backend.put_firehose_record(stream_name, record_data)
return json.dumps({
"RecordId": record.record_id,
})
def put_record_batch(self):
stream_name = self.parameters['DeliveryStreamName']
records = self.parameters['Records']
request_responses = []
for record in records:
record_response = self.kinesis_backend.put_firehose_record(stream_name, record['Data'])
request_responses.append({
"RecordId": record_response.record_id
})
return json.dumps({
"FailedPutCount": 0,
"RequestResponses": request_responses,
})
def add_tags_to_stream(self):
stream_name = self.parameters.get('StreamName')
tags = self.parameters.get('Tags')
self.kinesis_backend.add_tags_to_stream(stream_name, tags)
return json.dumps({})
def list_tags_for_stream(self):
stream_name = self.parameters.get('StreamName')
exclusive_start_tag_key = self.parameters.get('ExclusiveStartTagKey')
limit = self.parameters.get('Limit')
response = self.kinesis_backend.list_tags_for_stream(stream_name, exclusive_start_tag_key, limit)
return json.dumps(response)
def remove_tags_from_stream(self):
stream_name = self.parameters.get('StreamName')
tag_keys = self.parameters.get('TagKeys')
self.kinesis_backend.remove_tags_from_stream(stream_name, tag_keys)
return json.dumps({})
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import os
import shutil
import pandas as pd
import pytest
from ludwig.constants import NAME
from ludwig.experiment import experiment_cli
from tests.integration_tests.utils import binary_feature, sequence_feature, \
set_feature, text_feature, vector_feature
from tests.integration_tests.utils import category_feature
from tests.integration_tests.utils import generate_data
from tests.integration_tests.utils import numerical_feature
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logging.getLogger("ludwig").setLevel(logging.INFO)
def run_experiment(input_features, output_features, **kwargs):
"""
Helper method to avoid code repetition in running an experiment. Deletes
the data saved to disk after running the experiment
:param input_features: list of input feature dictionaries
:param output_features: list of output feature dictionaries
**kwargs you may also pass extra parameters to the experiment as keyword
arguments
:return: None
"""
config = None
if input_features is not None and output_features is not None:
# This if is necessary so that the caller can call with
# config_file (and not config)
config = {
'input_features': input_features,
'output_features': output_features,
'combiner': {
'type': 'concat',
'fc_size': 64,
'num_fc_layers': 5
},
'training': {'epochs': 2}
}
args = {
'config': config,
'skip_save_processed_input': True,
'skip_save_progress': True,
'skip_save_unprocessed_output': True,
'skip_save_model': True,
'skip_save_log': True
}
args.update(kwargs)
exp_dir_name = experiment_cli(**args)
shutil.rmtree(exp_dir_name, ignore_errors=True)
@pytest.mark.parametrize(
'input_test_feature, output_test_feature, output_loss_parameter',
[
# numerical features
(numerical_feature(), numerical_feature(), None),
(
numerical_feature(normalization='minmax'),
numerical_feature(),
{'loss': {'type': 'mean_squared_error'}}
),
(
numerical_feature(normalization='zscore'),
numerical_feature(),
{'loss': {'type': 'mean_absolute_error'}}
),
# binary feature
(binary_feature(), binary_feature(), None),
# Categorical feature
(category_feature(), category_feature(), None),
(
category_feature(),
category_feature(),
{'loss': {'type': 'softmax_cross_entropy'}}
),
(
category_feature(),
category_feature(),
{'loss': {
'type': 'sampled_softmax_cross_entropy',
'sampler': 'fixed_unigram',
'negative_samples': 10
}
}
),
(
category_feature(),
category_feature(),
{'loss': {
'type': 'sampled_softmax_cross_entropy',
'sampler': 'uniform',
'negative_samples': 10
}
}
),
(
category_feature(),
category_feature(),
{'loss': {
'type': 'sampled_softmax_cross_entropy',
'sampler': 'log_uniform',
'negative_samples': 10
}
}
),
(
category_feature(),
category_feature(),
{'loss': {
'type': 'sampled_softmax_cross_entropy',
'sampler': 'learned_unigram',
'negative_samples': 10
}
}
)
]
)
def test_feature(input_test_feature, output_test_feature,
output_loss_parameter, csv_filename):
input_features = [
input_test_feature
]
of_test_feature = output_test_feature
if output_loss_parameter is not None:
of_test_feature.update(output_loss_parameter)
output_features = [of_test_feature]
# Generate test data
rel_path = generate_data(input_features, output_features, csv_filename,
1001)
run_experiment(input_features, output_features, dataset=rel_path)
@pytest.mark.parametrize(
'input_test_feature, output_test_feature',
[
([category_feature()],
[binary_feature(), binary_feature()]),
([category_feature()],
[category_feature(vocab_size=5), category_feature(vocab_size=7)]),
([category_feature()],
[numerical_feature(), numerical_feature()]),
([category_feature()],
[sequence_feature(vocab_size=5), sequence_feature(vocab_size=7)]),
([category_feature()],
[set_feature(vocab_size=5), set_feature(vocab_size=7)]),
([category_feature()],
[text_feature(vocab_size=5), text_feature(vocab_size=7)]),
([category_feature()],
[vector_feature(), vector_feature()]),
]
)
def test_feature_multiple_outputs(input_test_feature, output_test_feature,
csv_filename):
# Generate test data
rel_path = generate_data(input_test_feature, output_test_feature,
csv_filename, 1001)
run_experiment(input_test_feature, output_test_feature, dataset=rel_path)
def test_category_int_dtype(tmpdir):
feature = category_feature()
input_features = [feature]
output_features = [binary_feature()]
csv_fname = generate_data(input_features, output_features,
os.path.join(tmpdir, 'dataset.csv'))
df = pd.read_csv(csv_fname)
distinct_values = df[feature[NAME]].drop_duplicates().values
value_map = {v: idx for idx, v in enumerate(distinct_values)}
df[feature[NAME]] = df[feature[NAME]].map(
lambda x: value_map[x]
)
run_experiment(input_features, output_features, dataset=df)
|
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for calibration_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.builders import calibration_builder
from object_detection.protos import calibration_pb2
from object_detection.utils import test_case
class CalibrationBuilderTest(test_case.TestCase):
def test_tf_linear_interp1d_map(self):
"""Tests TF linear interpolation mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.5, 0.5, 0.5])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_map_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_map_outputs
tf_map_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])
def test_tf_linear_interp1d_interpolate(self):
"""Tests TF 1d linear interpolation not mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.6, 0.7, 1.0])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_interpolate_outputs
tf_interpolate_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.])
@staticmethod
def _get_scipy_interp1d(new_x, x, y):
"""Helper performing 1d linear interpolation using SciPy."""
interpolation1d_fn = interpolate.interp1d(x, y)
return interpolation1d_fn(new_x)
def _get_tf_interp1d(self, new_x, x, y):
"""Helper performing 1d linear interpolation using Tensorflow."""
def graph_fn():
tf_interp_outputs = calibration_builder._tf_linear_interp1d(
tf.convert_to_tensor(new_x, dtype=tf.float32),
tf.convert_to_tensor(x, dtype=tf.float32),
tf.convert_to_tensor(y, dtype=tf.float32))
return tf_interp_outputs
np_tf_interp_outputs = self.execute(graph_fn, [])
return np_tf_interp_outputs
def test_tf_linear_interp1d_against_scipy_map(self):
"""Tests parity of TF linear interpolation with SciPy for simple mapping."""
length = 10
np_x = np.linspace(0, 1, length)
# Mapping all numbers to 0.5
np_y_map = np.repeat(0.5, length)
# Scipy and TF interpolations
test_data_np = np.linspace(0, 1, length * 10)
scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)
np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)
self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)
def test_tf_linear_interp1d_against_scipy_interpolate(self):
"""Tests parity of TF linear interpolation with SciPy."""
length = 10
np_x = np.linspace(0, 1, length)
# Requires interpolation over 0.5 to 1 domain
np_y_interp = np.linspace(0.5, 1, length)
# Scipy interpolation for comparison
test_data_np = np.linspace(0, 1, length * 10)
scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x,
np_y_interp)
np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x,
np_y_interp)
self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)
@staticmethod
def _add_function_approximation_to_calibration_proto(calibration_proto,
x_array, y_array,
class_id):
"""Adds a function approximation to calibration proto for a class id."""
# Per-class calibration.
if class_id is not None:
function_approximation = (
calibration_proto.class_id_function_approximations
.class_id_xy_pairs_map[class_id])
# Class-agnostic calibration.
else:
function_approximation = (
calibration_proto.function_approximation.x_y_pairs)
for x, y in zip(x_array, y_array):
x_y_pair_message = function_approximation.x_y_pair.add()
x_y_pair_message.x = x
x_y_pair_message.y = y
def test_class_agnostic_function_approximation(self):
"""Tests that calibration produces correct class-agnostic values."""
# Generate fake calibration proto. For this interpolation, any input on
# [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have
# 0.25 subtracted from it.
class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3],
[0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8],
[0.9, 1.0, 1.0]]], dtype=tf.float32)
# Everything should map to 0.5 if classes are ignored.
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15],
[0.2, 0.25, 0.0]],
[[0.35, 0.45, 0.55],
[0.65, 0.75, 0.75]]])
def test_multiclass_function_approximations(self):
"""Tests that calibration produces correct multiclass values."""
# Background class (0-index) maps all predictions to 0.5.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
# Class id 1 will interpolate using these values.
class_1_x = np.asarray([0.0, 0.2, 1.0])
class_1_y = np.asarray([0.0, 0.6, 1.0])
self._add_function_approximation_to_calibration_proto(
calibration_config, class_1_x, class_1_y, class_id=1)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]],
[[0.5, 0.7], [0.5, 0.96]]])
def test_temperature_scaling(self):
"""Tests that calibration produces correct temperature scaling values."""
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 2.0
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np,
[[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],
[[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
def test_temperature_scaling_incorrect_value_error(self):
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 0
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3]]], dtype=tf.float32)
with self.assertRaises(ValueError):
calibration_fn(class_predictions_with_background)
def test_skips_class_when_calibration_parameters_not_present(self):
"""Tests that graph fails when parameters not present for all classes."""
# Only adding calibration parameters for class id = 0, even though class id
# 1 is present in the data.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]],
[[0.5, 0.4], [0.5, 0.92]]])
if __name__ == '__main__':
tf.test.main()
|
|
"""
Django settings for Library project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import datetime
# from Library.local_settings import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mnpw!$&cxxe@lbqa984&_zf-s#_!ms-er@i$i4xh4=e-(2(2$w'
PRODUCTION = (os.getenv('PRODUCTION', False) == 'True')
DOKCER = (os.getenv('DOCKER', False) == 'True')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*', ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'django_nose',
# 'haystack',
'rest_framework',
'rest_framework_swagger',
'api',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'Library.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Library.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
if not DOKCER:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'library',
'HOST': 'db',
'PORT': '3306',
'USER': 'dbadmin',
'PASSWORD': '123456',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_URL = '/admin/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'zh-Hant'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = True
FILE_CHARSET = 'utf-8'
DEFAULT_CHARSET = 'utf-8'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), ]
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
# REST FRAMEWORK SETTING
AUTH_USER_MODEL = 'api.CustomUser'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DATETIME_FORMAT': '%Y-%m-%d %H:%M',
'DATETIME_INPUT_FORMATS': '%Y-%m-%d %H:%M',
'DATE_FORMAT': '%Y-%m-%d',
'DATE_INPUT_FORMATS': ['%Y-%m-%d', ],
'TIME_FORMAT': '%H:%M',
'TIME_INPUT_FORMATS': '%H:%M',
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
# JWT Setting
JWT_AUTH = {
'JWT_ENCODE_HANDLER':
'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER':
'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER':
'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_RESPONSE_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_response_payload_handler',
'JWT_SECRET_KEY': SECRET_KEY,
'JWT_PUBLIC_KEY': None,
'JWT_PRIVATE_KEY': None,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1),
'JWT_AUDIENCE': None,
'JWT_ISSUER': None,
'JWT_ALLOW_REFRESH': True,
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=30),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
}
## SWAGGER_SETTING
SWAGGER_SETTINGS = {
'USE_SESSION_AUTH': True,
'SECURITY_DEFINITIONS': {
'basic': {
'type': 'basic'
},
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
},
},
'DOC_EXPANSION': 'list',
"JSON_EDITOR": True,
"APIS_SORTER": "alpha",
"SHOW_REQUEST_HEADERS": True,
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--nocapture',
'--nologcapture',
]
# if PRODUCTION:
# HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
# 'URL': 'http://127.0.0.1:9200/',
# 'INDEX_NAME': 'haystack',
# },
# }
# EMAIL_USE_TLS = True
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_HOST = 'smtp.gmail.com'
#
# EMAIL_PORT = 587
# DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
|
|
import os
import time
import numpy as np
from scipy.interpolate import interp1d
from math import log, pi, sqrt, e, isnan
from matplotlib import pylab as plt
from matplotlib import mlab as mlab
from openmdao.main.api import Assembly, Component
from openmdao.lib.datatypes.api import Float, Bool, Str
from openmdao.lib.drivers.api import CaseIteratorDriver, BroydenSolver, NewtonSolver
from openmdao.lib.casehandlers.api import BSONCaseRecorder, CaseDataset
#Cengal Y., Turner R., and Cimbala J., Fundamentals of
# Thermal-Fluid Sciences, McGraw-Hill Companies, 2008.
#Table A-22 pg 1020
temp_lookup = np.array([0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, \
60, 70, 80, 90, 100, 120, 140, 160, 180, \
200, 250, 300])
k_lookup = np.array([0.02364, .02401, .02439, .02476, .02514, \
.02551, .02588, .02625, .02662, .02699, .02735, \
.02808, .02881, .02953, .03024, .03095, .03235, \
.03374, .03511, .03646, .03779, .04104, .04418])
k_interp = interp1d(temp_lookup, k_lookup, fill_value=.02, bounds_error=False)
nu_lookup = np.array([1.338, 1.382, 1.426, 1.470, 1.516, 1.562, 1.608,\
1.655, 1.702, 1.75, 1.798, 1.896, 1.995, 2.097, \
2.201, 2.306, 2.522, 2.745, 2.975, 3.212,\
3.455, 4.091, 4.765])*(10**(-5))
nu_interp = interp1d(nu_lookup, k_lookup, fill_value=1.3e-5, bounds_error=False)
alpha_lookup = np.array([1.818, 1.880, 1.944, 2.009, 2.074, 2.141, 2.208,\
2.277, 2.346, 2.416, 2.487, 2.632, 2.78, 2.931,\
3.086, 3.243, 3.565, 3.898, 4.241, 4.592, \
4.954, 5.89, 6.871])*(10**(-5))
alpha_interp = interp1d(alpha_lookup, k_lookup, fill_value=1.8e-5, bounds_error=False)
pr_lookup = np.array([.7362, .7350, .7336, .7323, .7309, .7296, .7282, \
.7268, .7255, .7241, .7228, .7202, .7177, .7154, \
.7132, .7111, .7073, .7041, .7014, .6992, \
.6974, .6946, .6935])
pr_interp = interp1d(pr_lookup, k_lookup, fill_value=.74, bounds_error=False)
class HyperloopMonteCarlo(Assembly):
#output
timestamp = Str(iotype='out',desc='timestamp for output filename')
def configure(self):
driver = self.add('driver', CaseIteratorDriver())
self.add('hyperloop', MiniHyperloop())
driver.add_parameter('hyperloop.temp_outside_ambient')
driver.add_parameter('hyperloop.solar_insolation')
driver.add_parameter('hyperloop.surface_reflectance')
driver.add_parameter('hyperloop.num_pods')
driver.add_parameter('hyperloop.emissivity_tube')
driver.add_parameter('hyperloop.Nu_multiplier')
driver.add_parameter('hyperloop.compressor_adiabatic_eff')
driver.add_response('hyperloop.temp_boundary')
#driver.add_response('hyperloop.radius_tube_outer')
N_SAMPLES = 15000
driver.case_inputs.hyperloop.temp_outside_ambient = np.random.normal(305,4.4,N_SAMPLES)
driver.case_inputs.hyperloop.solar_insolation = np.random.triangular(200,1000,1000,N_SAMPLES); #left, mode, right, samples
driver.case_inputs.hyperloop.c_solar = np.random.triangular(0.5,0.7,1,N_SAMPLES);
driver.case_inputs.hyperloop.surface_reflectance = np.random.triangular(0.4,0.5,0.9,N_SAMPLES);
driver.case_inputs.hyperloop.num_pods = np.random.normal(34,2,N_SAMPLES);
driver.case_inputs.hyperloop.emissivity_tube = np.random.triangular(0.4,0.5,0.9,N_SAMPLES);
driver.case_inputs.hyperloop.Nu_multiplier = np.random.triangular(0.9,1,3,N_SAMPLES);
driver.case_inputs.hyperloop.compressor_adiabatic_eff = np.random.triangular(0.6,0.69,0.8,N_SAMPLES);
self.timestamp = time.strftime("%Y%m%d%H%M%S")
self.recorders = [BSONCaseRecorder('../output/therm_mc_%s.bson'%self.timestamp)]
class MiniHyperloop(Assembly):
""" Abriged Hyperloop Model """
temp_boundary = Float(0, iotype="out", desc="final equilibirum tube wall temperature")
def configure(self):
#Add Components
self.add('tubeTemp', TubeWallTemp2())
driver = self.add('driver',NewtonSolver())
driver.add_parameter('tubeTemp.temp_boundary',low=0.,high=500.)
driver.add_constraint('tubeTemp.ss_temp_residual=0')
driver.workflow.add(['tubeTemp'])
#Boundary Input Connections
#Hyperloop -> Compressor
self.connect('tubeTemp.temp_boundary','temp_boundary')
self.create_passthrough('tubeTemp.compressor_adiabatic_eff')
#self.create_passthrough('tubeTemp.temp_boundary')
#Hyperloop -> TubeWallTemp
self.create_passthrough('tubeTemp.c_solar')
self.create_passthrough('tubeTemp.temp_outside_ambient')
self.create_passthrough('tubeTemp.solar_insolation')
self.create_passthrough('tubeTemp.surface_reflectance')
self.create_passthrough('tubeTemp.num_pods')
self.create_passthrough('tubeTemp.emissivity_tube')
self.create_passthrough('tubeTemp.Nu_multiplier')
class TubeWallTemp2(Component):
""" [Tweaked from original to include simple comp calcs] Calculates Q released/absorbed by the hyperloop tube """
#--New Comp Inputs--
pod_MN = Float(0.91, iotype='in', desc='Capsule Mach number')
Wdot = Float(0.49, units='kg/s', iotype='in', desc='Airflow')
tube_P = Float(99., units='Pa', iotype='in', desc='Tube ambient pressure')
compPR = Float(12., iotype='in',desc='Compressor Pressure ratio')
compressor_adiabatic_eff = Float(.8, iotype="in", desc="adiabatic efficiency for the compressors")
inlet_Tt = Float(367, units='K', iotype='out', desc='Inlet total temperature')
inlet_Pt = Float(169., units='Pa', iotype='out', desc='Compressor inlet total pressure')
exit_Tt = Float(948, units='K', iotype='out', desc='Exit total temperature')
exit_Pt = Float(2099., units='Pa', iotype='out', desc='Compressor exit total pressure')
cp_air = Float(1148.9, units='J/(kg*K)', iotype='out', desc='Specific heat of air, compressor exit')
pod_heat = Float(356149., units='W', iotype='out', desc='Heating due to a single capsule')
#--Inputs--
#Hyperloop Parameters/Design Variables
radius_outer_tube = Float(2, units = 'm', iotype='in', desc='tube outer diameter') #7.3ft 1.115
length_tube = Float(482803, units = 'm', iotype='in', desc='Length of entire Hyperloop') #300 miles, 1584000ft
num_pods = Float(34, iotype='in', desc='Number of Pods in the Tube at a given time') #
temp_boundary = Float(322.0, units = 'K', iotype='in', desc='Average Temperature of the tube wall') #
temp_outside_ambient = Float(305.6, units = 'K', iotype='in', desc='Average Temperature of the outside air') #
#nozzle_air = FlowStationVar(iotype="in", desc="air exiting the pod nozzle", copy=None)
#bearing_air = FlowStationVar(iotype="in", desc="air exiting the air bearings", copy=None)
#constants
solar_insolation = Float(1000., iotype="in", units = 'W/m**2', desc='solar irradiation at sea level on a clear day') #
c_solar = Float(1, iotype='in', desc='irradiance adjustment factor')
nn_incidence_factor = Float(0.7, iotype="in", desc='Non-normal incidence factor') #
surface_reflectance = Float(0.5, iotype="in", desc='Solar Reflectance Index') #
q_per_area_solar = Float(350., units = 'W/m**2', desc='Solar Heat Rate Absorbed per Area') #
q_total_solar = Float(375989751., iotype="in", units = 'W', desc='Solar Heat Absorbed by Tube') #
emissivity_tube = Float(0.5, iotype="in", units = 'W', desc='Emmissivity of the Tube') #
sb_constant = Float(0.00000005670373, iotype="in", units = 'W/((m**2)*(K**4))', desc='Stefan-Boltzmann Constant') #
Nu_multiplier = Float(1, iotype="in", desc="fudge factor on nusslet number to account for small breeze on tube")
#--Outputs--
area_rad = Float(337486.1, units = 'm**2', iotype='out', desc='Tube Radiating Area') #
#Required for Natural Convection Calcs
GrDelTL3 = Float(1946216.7, units = '1/((ft**3)*F)', iotype='out', desc='Heat Radiated to the outside') #
Pr = Float(0.707, iotype='out', desc='Prandtl') #
Gr = Float(12730351223., iotype='out', desc='Grashof #') #
Ra = Float(8996312085., iotype='out', desc='Rayleigh #') #
Nu = Float(232.4543713, iotype='out', desc='Nusselt #') #
k = Float(0.02655, units = 'W/(m*K)', iotype='out', desc='Thermal conductivity') #
h = Float(0.845464094, units = 'W/((m**2)*K)', iotype='out', desc='Heat Radiated to the outside') #
alpha = Float(2.487*(10**(-5)), units = '(m**2)/s', iotype='out', desc='Thermal diffusivity') #
k_visc = Float(1.798*(10**(-5)), units = '(m**2)/s', iotype='out', desc='Kinematic viscosity') #
film_temp = Float(310, units = 'K', iotype='out', desc='Film temperature') #
area_convection = Float(3374876.115, units = 'm**2', iotype='out', desc='Convection Area') #
#Natural Convection
q_per_area_nat_conv = Float(7.9, units = 'W/(m**2)', iotype='out', desc='Heat Radiated per Area to the outside') #
total_q_nat_conv = Float(286900419., units = 'W', iotype='out', desc='Total Heat Radiated to the outside via Natural Convection') #
#Exhausted from Pods
heat_rate_pod = Float(519763, units = 'W', iotype='out', desc='Heating Due to a Single Pods') #
total_heat_rate_pods = Float(17671942., units = 'W', iotype='out', desc='Heating Due to a All Pods') #
#Radiated Out
q_rad_per_area = Float(31.6, units = 'W/(m**2)', iotype='out', desc='Heat Radiated to the outside') #
q_rad_tot = Float(106761066.5, units = 'W', iotype='out', desc='Heat Radiated to the outside') #
#Radiated In
viewing_angle = Float(1074256, units = 'm**2', iotype='out', desc='Effective Area hit by Sun') #
#Total Heating
q_total_out = Float(286900419., units = 'W', iotype='out', desc='Total Heat Released via Radiation and Natural Convection') #
q_total_in = Float(286900419., units = 'W', iotype='out', desc='Total Heat Absorbed/Added via Pods and Solar Absorption') #
#Residual (for solver)
ss_temp_residual = Float(units = 'K', iotype='out', desc='Residual of T_released - T_absorbed')
failures = Float(0,iotype='out', desc='invalid run cases (temp goes negative)')
def execute(self):
"""Calculate Various Paramters"""
#New Simple Compressor Calcs
self.inlet_Tt = self.temp_boundary*(1+0.2*self.pod_MN**2)
self.inlet_Pt = self.tube_P*(1+0.2*self.pod_MN**2)**3.5
self.exit_Tt = self.inlet_Tt*(1 + (1/self.compressor_adiabatic_eff)*(self.compPR**(1/3.5)-1) )
self.exit_Pt = self.inlet_Pt * self.compPR
if (self.exit_Tt<0):
self.failures +=1
#print self.temp_boundary, "invalid cases: ", self.failures
elif(self.exit_Tt<400):
self.cp_air = 990.8*self.exit_Tt**(0.00316)
else:
self.cp_air = 299.4*self.exit_Tt**(0.1962)
self.heat_rate_pod = self.Wdot*self.cp_air*(self.exit_Tt-self.temp_boundary)
#----
self.diameter_outer_tube = 2*self.radius_outer_tube
#bearing_q = cu(self.bearing_air.W,'lbm/s','kg/s') * cu(self.bearing_air.Cp,'Btu/(lbm*degR)','J/(kg*K)') * (cu(self.bearing_air.Tt,'degR','degK') - self.temp_boundary)
#nozzle_q = cu(self.nozzle_air.W,'lbm/s','kg/s') * cu(self.nozzle_air.Cp,'Btu/(lbm*degR)','J/(kg*K)') * (cu(self.nozzle_air.Tt,'degR','degK') - self.temp_boundary)
#Q = mdot * cp * deltaT
#self.heat_rate_pod = nozzle_q +bearing_q
#Total Q = Q * (number of pods)
self.total_heat_rate_pods = self.heat_rate_pod*self.num_pods
## Film Temp method
#(interp tables in Celsius)
self.film_temp = (self.temp_outside_ambient + self.temp_boundary)/2.
# # Berton Method
# #Determine thermal resistance of outside via Natural Convection or forced convection
# if(self.film_temp < 400):
# self.GrDelTL3 = 41780000000000000000*((self.film_temp)**(-4.639)) #SI units (https://mdao.grc.nasa.gov/publications/Berton-Thesis.pdf pg51)
# else:
# self.GrDelTL3 = 4985000000000000000*((self.film_temp)**(-4.284)) #SI units (https://mdao.grc.nasa.gov/publications/Berton-Thesis.pdf pg51)
# #Prandtl Number
# #Pr = viscous diffusion rate/ thermal diffusion rate = Cp * dyanamic viscosity / thermal conductivity
# #Pr << 1 means thermal diffusivity dominates
# #Pr >> 1 means momentum diffusivity dominates
# if (self.film_temp < 400):
# self.Pr = 1.23*(self.film_temp**(-0.09685)) #SI units (https://mdao.grc.nasa.gov/publications/Berton-Thesis.pdf pg51)
# else:
# self.Pr = 0.59*(self.film_temp**(0.0239))
# #Grashof Number
# #Relationship between buoyancy and viscosity
# #Laminar = Gr < 10^8
# #Turbulent = Gr > 10^9
# self.Gr = self.GrDelTL3*abs(self.temp_boundary-self.film_temp)*(self.diameter_outer_tube**3) #JSG: Added abs incase subtraction goes negative
# #Rayleigh Number
# #Buoyancy driven flow (natural convection)
# self.Ra = self.Pr * self.Gr
self.k = float(k_interp(self.film_temp-273.15))
self.k_visc = float(nu_interp(self.film_temp-273.15))
self.alpha = float(alpha_interp(self.film_temp-273.15))
self.Pr = float(pr_interp(self.film_temp-273.15))
self.Ra = (9.81*(1/self.film_temp)* \
np.abs(self.temp_boundary - self.temp_outside_ambient) * \
(self.diameter_outer_tube**3) * self.Pr) / (self.k_visc**2)
#Nusselt Number
#Nu = convecive heat transfer / conductive heat transfer
if (self.Ra<=10**12): #valid in specific flow regime
self.Nu = self.Nu_multiplier*((0.6 + 0.387*self.Ra**(1./6.)/(1 + (0.559/self.Pr)**(9./16.))**(8./27.))**2) #3rd Ed. of Introduction to Heat Transfer by Incropera and DeWitt, equations (9.33) and (9.34) on page 465
else:
self.Nu = 232.4543713
# if(self.temp_outside_ambient < 400):
# self.k = 0.0001423*(self.temp_outside_ambient**(0.9138)) #SI units (https://mdao.grc.nasa.gov/publications/Berton-Thesis.pdf pg51)
# else:
# self.k = 0.0002494*(self.temp_outside_ambient**(0.8152))
#h = k*Nu/Characteristic Length
self.h = (self.k * self.Nu)/ self.diameter_outer_tube
#Convection Area = Surface Area
self.area_convection = pi * self.length_tube * self.diameter_outer_tube
#Determine heat radiated per square meter (Q)
self.q_per_area_nat_conv = self.h*(self.temp_boundary-self.temp_outside_ambient)
#Determine total heat radiated over entire tube (Qtotal)
self.total_q_nat_conv = self.q_per_area_nat_conv * self.area_convection
#Determine heat incoming via Sun radiation (Incidence Flux)
#Sun hits an effective rectangular cross section
self.area_viewing = self.length_tube* self.diameter_outer_tube
self.q_per_area_solar = (1-self.surface_reflectance)* self.c_solar * self.solar_insolation
self.q_total_solar = self.q_per_area_solar * self.area_viewing
#Determine heat released via radiation
#Radiative area = surface area
self.area_rad = self.area_convection
#P/A = SB*emmisitivity*(T^4 - To^4)
self.q_rad_per_area = self.sb_constant*self.emissivity_tube*((self.temp_boundary**4) - (self.temp_outside_ambient**4))
#P = A * (P/A)
self.q_rad_tot = self.area_rad * self.q_rad_per_area
#------------
#Sum Up
self.q_total_out = self.q_rad_tot + self.total_q_nat_conv
self.q_total_in = self.q_total_solar + self.total_heat_rate_pods
self.ss_temp_residual = (self.q_total_out - self.q_total_in)/1e6
if __name__ == '__main__' and __package__ is None:
from os import sys, path #hack to import MC_Plot from sibling directory
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from plot.mc_histo import MC_Plot as mcp
hl_mc = HyperloopMonteCarlo()
#initial run to converge things
hl_mc.run()
#plot
if (True):
histo = mcp()
histo.plot('../output/therm_mc_%s.bson'%hl_mc.timestamp)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Database abstraction layer. Simplyfies database
handling a bit.
An example of common usecase could be as such:
# Import the module
from databaselayer import database
# Create the database
myDB = database.Database('SQLite', 'database.sql')
# Create a table
myDB.execute(
'CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username TEXT)'
)
# Insert a few people in the users table
myDB.insert('users', {'username': 'John'})
myDB.insert('users', {'username': 'Tom'})
"""
import threading
import sys
try:
import sqlite3
SQLITE = True
except ImportError:
# Fallback for sqlite3 (custom install)
try:
from pysqlite2 import dbapi2 as sqlite3
SQLITE = True
except ImportError:
SQLITE = False
try:
import MySQLdb
MYSQL = True
except ImportError:
MYSQL = False
class Database(threading.Thread):
"""
Higher level database abstraction layer.
Provides a database abstraction layer, for easy use with
multiple different database types, without the need to
think about SQL differences. If you want to execute raw SQL,
you can use the execute method.
Throughout the class, a lot of methods take in a filter argument.
The filter is in the format of {'field': 'value'}. The data
argument follows the same syntax.
The add argument is to add additional raw SQL to a constructed
query (e.g. add="ORDER BY time").
"""
def __init__(self, dbtype=None, dbname=None, dbserver=None, creden=None):
"""Sets the values for the database instance"""
threading.Thread.__init__(self)
try:
self.dbtype = dbtype
self.dbname = dbname
except NameError:
raise NameError('No database type or name specified!')
if dbserver is not None:
self.dbserver = dbserver
if creden is not None:
try:
self.user = creden['username']
except KeyError:
self.user = None
try:
self.passwd = creden['password']
except KeyError:
self.passwd = None
else:
self.user = None
self.passwd = None
self.temp_values = None
self.temp_insert_values = None
self.last_insert_id = None
self.conn = None
self.cursor = None
def connect(self):
"""Make the connection based on the type of database.
Types allowed:
SQLite
MySQL
"""
if SQLITE and self.dbtype == 'SQLite':
self.conn = sqlite3.connect(self.dbname)
self.cursor = self.conn.cursor()
elif MYSQL and self.dbtype == 'MySQL':
self.conn = MySQLdb.connect(host=self.dbserver, db=self.dbname,
user=self.user, passwd=self.passwd)
self.cursor = self.conn.cursor()
else:
raise NameError('No database available!')
def _keys_to_sql(self, keys=None, sep='AND '):
"""Construct the SQL filter from a dict"""
if keys is None:
keys = {}
filters = []
self.temp_values = ()
for field, value in list(keys.items()):
filters.append("%s = ? " % field)
self.temp_values = self.temp_values + (value,)
return sep.join(filters)
def _keys_to_insert_sql(self, keys=None, sep=', '):
"""Convert a dict into an SQL field value pair"""
if keys is None:
keys = {}
fields = []
values = []
self.temp_insert_values = ()
for field, value in list(keys.items()):
fields.append(field)
values.append('?')
self.temp_insert_values = self.temp_insert_values + (value,)
fields = '(' + sep.join(fields) + ') '
values = 'VALUES(' + sep.join(values) + ') '
return fields + values
def execute(self, sql=None):
"""Simply execute the given SQL"""
if sql is not None:
self.connect()
try:
self.cursor.execute(sql)
except sqlite3.OperationalError as error:
self.conn.rollback()
return 'SQL Error: %s' % error
else:
self.conn.commit()
self.cursor.close()
else:
raise NameError('There was no SQL to be parsed')
def rawfetch(self, sql=None, data=None, fetchall=True, out='none'):
"""Fetches all rows from the given SQL.
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if sql is not None:
self.connect()
try:
if data is None:
self.cursor.execute(sql)
else:
self.cursor.execute(sql, tuple(data))
except sqlite3.OperationalError as error:
self.conn.rollback()
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
# Cleanup and return
if fetchall:
result = self.cursor.fetchall()
else:
result = self.cursor.fetchone()
self.cursor.close()
return result
else:
raise NameError('There was no SQL to be parsed')
def fetchall(self, table=None, filters=None, add='', out='none'):
"""Fetches all rows from database based on the filters applied.
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
append = ' WHERE '
if filters is None:
filters = {}
append = ''
if table is not None:
# Construct the SQL
sql = 'SELECT * FROM ' + table + append +\
self._keys_to_sql(filters)
self.connect()
try:
self.cursor.execute(sql + add, self.temp_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
# Cleanup and return
del self.temp_values
result = self.cursor.fetchall()
self.cursor.close()
return result
else:
raise NameError('Table not specified!')
def fetchone(self, table=None, filters=None, out='none'):
"""Fetches the first row from database based on the filters applied.
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if filters is None:
filters = {}
if table is not None:
# Construct the SQL
sql = 'SELECT * FROM ' + table + ' WHERE ' +\
self._keys_to_sql(filters)
self.connect()
try:
self.cursor.execute(sql, self.temp_values)
except sqlite3.OperationalError as error:
del self.temp_values
self.conn.rollback()
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
# Cleanup and return
del self.temp_values
result = self.cursor.fetchone()
self.cursor.close()
return result
else:
raise NameError('Table not specified!')
def insert(self, table=None, data=None, out=None):
"""
Inserts specified data into the database
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if data is None:
data = {}
if table is not None:
sql = 'INSERT INTO ' + table + self._keys_to_insert_sql(data)
self.connect()
try:
self.cursor.execute(sql, self.temp_insert_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_insert_values
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
write("With data : %s" % (self.temp_insert_values,))
del self.temp_insert_values
# TODO Fix the last insert id
# self.last_insert_id = self.cursor.lastrowid()
self.conn.commit()
self.cursor.close()
return True
else:
raise NameError('Table not specified!')
def update(self, table=None, data=None, filters=None, out=None):
"""
Updates rows where filters apply with, given data
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if data is None:
data = {}
if filters is None:
filters = {}
if table is not None:
values = []
data = self._keys_to_sql(data, sep=', ')
values = self.temp_values
if filters:
filters = ' WHERE ' + str(self._keys_to_sql(filters))
values = values + self.temp_values
else:
filters = ''
sql = 'UPDATE ' + table + ' SET ' + data + filters
self.connect()
try:
self.cursor.execute(sql, values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
del self.temp_values
# TODO Fix the last insert id
# self.last_insert_id = self.cursor.lastrowid()
self.conn.commit()
self.cursor.close()
return True
else:
raise NameError('Table not specified!')
def delete(self, table=None, filters=None):
"""Deletes rows where given filters apply"""
if filters is None:
filters = {}
if table is not None:
filters = self._keys_to_sql(filters)
sql = 'DELETE FROM ' + table + ' WHERE ' + filters
self.connect()
try:
self.cursor.execute(sql, self.temp_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
return 'SQL Error: %s' % error
else:
del self.temp_values
self.conn.commit()
self.cursor.close()
return True
else:
raise NameError('Table not specified!')
def count(self, table=None, filters=None):
"""Counts the rows based on the given filters"""
if table is not None:
# Construct the SQL
sql = 'SELECT * FROM ' + table + ' WHERE '
sql += self._keys_to_sql(filters)
self.connect()
try:
self.cursor.execute(sql, self.temp_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
return 'SQL Error: %s' % error
else:
# Cleanup and return
del self.temp_values
count = self.cursor.rowcount()
self.cursor.close()
if count < 0 or count is None:
count = 0
return count
else:
raise NameError('Table not specified!')
def write(text):
"""Handle the output from the IRC bot"""
text = str(text) + "\n"
sys.stdout.write(text)
sys.stdout.flush()
|
|
import collections
from .base_array import base_array
from .composite import codec_kind, distance_to_next_multiply, struct_packed
from .descriptor import DescriptorField
from .exception import ProphyError
from .scalar import u32
from .six import long
class _generator_base(type):
"""
Base metaclass type intended to validate, supplement and create all
prophy_data_object classes.
"""
_slots = []
def __new__(mcs, name, bases, attrs):
attrs["__slots__"] = mcs._slots
return super(_generator_base, mcs).__new__(mcs, name, bases, attrs)
def __init__(cls, name, bases, attrs):
if not hasattr(cls, "_generated"):
cls._generated = True
cls._build_up_implementation()
super(_generator_base, cls).__init__(name, bases, attrs)
def _build_up_implementation(cls):
"""
Implementation of type creation. To be overridden in derived metaclasses.
"""
def __eq__(cls, other):
if not isinstance(other, cls.__class__):
return NotImplemented
def type_attr_eq(attr):
if hasattr(cls_d.type, attr) and hasattr(other_d.type, attr):
return getattr(cls_d.type, attr) == getattr(other_d.type, attr)
if cls.__bases__ != other.__bases__:
return False
if len(cls._descriptor) != len(other._descriptor):
return False
for cls_d, other_d in zip(cls._descriptor, other._descriptor):
if cls_d.name != other_d.name:
return False
if cls_d.discriminator != other_d.discriminator:
return False
if cls_d.type.__name__ == other_d.type.__name__ == '_array':
cls_d_type, other_d_type = cls_d.type._TYPE, other_d.type._TYPE
for f in ('_SIZE', '_BOUND', '_DYNAMIC', '_ALIGNMENT'):
if not type_attr_eq(f):
return False
if getattr(cls_d.type._TYPE, f) != getattr(other_d.type._TYPE, f):
return False
elif cls_d.type.__name__ == other_d.type.__name__ == 'container_len':
cls_d_type, other_d_type = cls_d.type.__name__, other_d.type.__name__
if not type_attr_eq('__bases__'):
return False
for f in ('_SIZE', '_BOUND', '_DYNAMIC', '_ALIGNMENT', '_TYPE'):
if not type_attr_eq(f):
return False
elif cls_d.type.__name__ == other_d.type.__name__ == '_bytes':
cls_d_type, other_d_type = cls_d.type.__name__, other_d.type.__name__
if not type_attr_eq('__bases__'):
return False
for f in ('_ALIGNMENT', '_BOUND', '_BOUND_SHIFT', '_DEFAULT', '_DYNAMIC', '_OPTIONAL', '_SIZE',
'_UNLIMITED'):
if not type_attr_eq(f):
return False
elif getattr(cls_d.type, '_OPTIONAL', False) == getattr(other_d.type, '_OPTIONAL', False) \
== True: # noqa: E712
cls_d_type, other_d_type = cls_d.type._optional_type, other_d.type._optional_type
else:
cls_d_type, other_d_type = cls_d.type, other_d.type
if cls_d_type != other_d_type:
return False
return True
def __ne__(cls, other):
are_equal = cls.__class__.__eq__(cls, other)
if are_equal is NotImplemented:
return NotImplemented
return not are_equal
class _composite_generator_base(_generator_base):
def _build_up_implementation(cls):
cls._descriptor = [DescriptorField(*field) for field in cls._descriptor]
cls.validate()
cls.add_attributes()
cls.extend_descriptor()
cls.add_properties()
cls.add_sizers()
def _types(cls):
for field in cls._descriptor:
yield field.type
def add_attributes(cls):
"""To be implemented in derived class."""
def extend_descriptor(cls):
for raw_item in cls._descriptor:
raw_item.evaluate_codecs()
def add_properties(cls):
"""To be implemented in derived class."""
def add_sizers(cls):
"""To be implemented in derived class."""
def validate(cls):
"""To be implemented in derived class."""
class enum_generator(_generator_base):
def _build_up_implementation(cls):
cls.validate()
cls.add_attributes()
def validate(cls):
for name, number in cls._enumerators:
if not isinstance(name, str):
msg = "enum ({}) member's first argument has to be string, got '{}'"
raise ProphyError(msg.format(cls.__name__, type(name).__name__))
if not isinstance(number, (int, long)):
msg = "enum member's ({}.{}) second argument has to be an integer, got '{}'"
raise ProphyError(msg.format(cls.__name__, name, type(number).__name__))
duplicates = ", ".join(_list_duplicates(name for name, _ in cls._enumerators))
if duplicates:
raise ProphyError("names overlap in '{}' enum, duplicates: {}".format(cls.__name__, duplicates))
def add_attributes(self):
def check(cls, value):
if isinstance(value, str):
value = name_to_int.get(value)
if value is None:
raise ProphyError("unknown enumerator name in {}".format(cls.__name__))
return cls(value)
elif isinstance(value, (int, long)):
if value not in int_to_name:
raise ProphyError("unknown enumerator {} value".format(cls.__name__))
return cls(value)
else:
raise ProphyError("neither string nor int")
name_to_int = {name: value for name, value in self._enumerators}
int_to_name = {value: name for name, value in self._enumerators}
list(map(self._check, (value for _, value in self._enumerators)))
self._DEFAULT = self(self._enumerators[0][1])
self._name_to_int = name_to_int
self._int_to_name = int_to_name
self._check = classmethod(check)
def __eq__(cls, other):
if not isinstance(other, cls.__class__):
return NotImplemented
return cls._enumerators == other._enumerators
class struct_generator(_composite_generator_base):
_slots = ["_fields"]
def validate(cls):
for field in cls._descriptor:
if not isinstance(field.name, str):
msg = "struct ({}) member's name must be a string type, got: '{}'"
raise ProphyError(msg.format(cls.__name__, type(field.name).__name__))
if not hasattr(field.type, "_is_prophy_object"):
msg = "struct member's ({}.{}) type must be a prophy object, is: {!r}"
raise ProphyError(msg.format(cls.__name__, field.name, field.type))
types = list(cls._types())
for type_ in types[:-1]:
if type_._UNLIMITED:
raise ProphyError("unlimited field is not the last one")
def add_attributes(cls):
cls._BOUND = None
cls._DYNAMIC = any(type_._DYNAMIC for type_ in cls._types())
cls._OPTIONAL = False
cls._PARTIAL_ALIGNMENT = None
cls._SIZE = sum((type_._OPTIONAL_SIZE if type_._OPTIONAL else type_._SIZE) for type_ in cls._types())
cls._UNLIMITED = any(type_._UNLIMITED for type_ in cls._types())
if not cls._descriptor:
cls._ALIGNMENT = 1
else:
cls._ALIGNMENT = max((t._OPTIONAL_ALIGNMENT if t._OPTIONAL else t._ALIGNMENT) for t in cls._types())
alignment = 1
for type_ in reversed(list(cls._types())):
if issubclass(type_, (base_array, bytes)) and type_._DYNAMIC:
type_._PARTIAL_ALIGNMENT = alignment
alignment = 1
alignment = max(type_._ALIGNMENT, alignment)
if not issubclass(cls, struct_packed) and cls._descriptor:
def get_padded_sizes():
types = list(cls._types())
sizes = [tp._SIZE for tp in types]
alignments = [tp._ALIGNMENT for tp in types[1:]] + [cls._ALIGNMENT]
offset = 0
for size, alignment in zip(sizes, alignments):
offset += size
padding = distance_to_next_multiply(offset, alignment)
offset += padding
yield padding
cls._SIZE += sum(get_padded_sizes())
def add_properties(cls):
for field in cls._descriptor:
if codec_kind.is_array(field.type):
cls.add_repeated_property(field)
elif codec_kind.is_composite(field.type):
cls.add_composite_property(field)
else:
cls.add_scalar_property(field)
def add_sizers(cls):
for field in cls._descriptor:
if codec_kind.is_array(field.type) or not codec_kind.is_struct(field.type):
if field.type._BOUND:
cls.substitute_len_field(field)
def add_repeated_property(cls, descriptor_field):
def getter(self):
value = self._fields.get(descriptor_field.name)
if value is None:
value = descriptor_field.type()
self._fields[descriptor_field.name] = value
return value
def setter(self, new_value):
raise ProphyError("assignment to array field not allowed")
setattr(cls, descriptor_field.name, property(getter, setter))
def add_scalar_property(cls, descriptor_field):
if descriptor_field.type._OPTIONAL:
def getter(self):
return self._fields.get(descriptor_field.name)
def setter(self, new_value):
if new_value is None:
self._fields[descriptor_field.name] = None
else:
self._fields[descriptor_field.name] = descriptor_field.type._check(new_value)
else:
def getter(self):
return self._fields.get(descriptor_field.name, descriptor_field.type._DEFAULT)
def setter(self, new_value):
self._fields[descriptor_field.name] = descriptor_field.type._check(new_value)
setattr(cls, descriptor_field.name, property(getter, setter))
def add_composite_property(cls, descriptor_field):
if descriptor_field.type._OPTIONAL:
def getter(self):
return self._fields.get(descriptor_field.name)
def setter(self, new_value):
if new_value is True:
self._fields[descriptor_field.name] = descriptor_field.type()
elif new_value is None:
self._fields.pop(descriptor_field.name, None)
else:
raise ProphyError("assignment to composite field not allowed")
else:
def getter(self):
value = self._fields.get(descriptor_field.name)
if value:
return value
else:
return self._fields.setdefault(descriptor_field.name, descriptor_field.type())
def setter(self, new_value):
raise ProphyError("assignment to composite field not allowed")
setattr(cls, descriptor_field.name, property(getter, setter))
def substitute_len_field(cls, container_item):
sizer_name = cls.validate_and_fix_sizer_name(container_item)
sizer_item = next(field for field in cls._descriptor if field.name == sizer_name)
bound_shift = container_item.type._BOUND_SHIFT
cls.validate_sizer_type(sizer_item, container_item)
if sizer_item.type.__name__ == "container_len":
sizer_item.type.add_bounded_container(container_item.name)
cls.validate_bound_shift(sizer_item.type, container_item.name, bound_shift)
else:
sizer_item.type = build_container_length_field(sizer_item.type, container_item.name, bound_shift)
sizer_item.evaluate_codecs()
delattr(cls, sizer_item.name)
def validate_and_fix_sizer_name(cls, container_item):
sizer_name = container_item.type._BOUND
items_before_sizer = cls._descriptor[:cls._descriptor.index(container_item)]
all_names = [field.name for field in cls._descriptor]
names_before = [field.name for field in items_before_sizer]
if sizer_name not in names_before:
if sizer_name in all_names:
msg = "Sizing member '{}' in '{}' must be placed before '{}' container."
raise ProphyError(msg.format(sizer_name, cls.__name__, container_item.name))
msg = "Sizing member '{}' of container '{}' not found in the object '{}'."
msg = msg.format(sizer_name, container_item.name, cls.__name__)
""" Try to be lenient. """
there_is_sizer_ending_with_s = (sizer_name + 's') in names_before
there_is_sizer_without_s = sizer_name.endswith('s') and sizer_name[:-1] in names_before
there_is_exactly_one_sizer = len([n for n in names_before if n.startswith("numOf")]) == 1
there_is_one_bound_array = len([f for f in cls._descriptor if f.type._BOUND]) == 1
if there_is_sizer_ending_with_s:
sizer_name += 's'
elif there_is_sizer_without_s:
sizer_name = sizer_name[:-1]
elif there_is_exactly_one_sizer and there_is_one_bound_array:
sizer_name = next(f for f in items_before_sizer if f.name.startswith("numOf")).name
else:
raise ProphyError(msg)
container_item.type._BOUND = sizer_name
print("Warning: {}\n Picking '{}' as the missing sizer instead.\n".format(msg, sizer_name))
return sizer_name
def validate_sizer_type(cls, sizer_item, container_item):
if sizer_item.type._OPTIONAL:
msg = "array {}.{} must not be bound to optional field"
raise ProphyError(msg.format(cls.__name__, container_item.name))
if not issubclass(sizer_item.type, (int, long)):
msg = "array {}.{} must be bound to an unsigned integer"
raise ProphyError(msg.format(cls.__name__, container_item.name))
def validate_bound_shift(cls, sizer_item_type, container_name, expected_bound_shift):
msg = "Different bound shifts are unsupported in externally sized arrays ({}.{})"
for field in cls._descriptor:
if field.name in sizer_item_type._BOUND:
if not field.type._BOUND_SHIFT == expected_bound_shift:
raise ProphyError(msg.format(cls.__name__, container_name))
def build_container_length_field(sizer_item_type, container_name, bound_shift):
class container_len(sizer_item_type):
_BOUND = [container_name]
_TYPE = sizer_item_type
@classmethod
def add_bounded_container(cls, cont_name):
cls._BOUND.append(cont_name)
@classmethod
def evaluate_size(cls, parent):
sizes = set(len(getattr(parent, c_name)) for c_name in cls._BOUND)
if len(sizes) != 1:
msg = "Size mismatch of arrays in {}: {}"
raise ProphyError(msg.format(parent.__class__.__name__, ", ".join(cls._BOUND)))
return sizes.pop()
@staticmethod
def _encode(value, endianness):
return sizer_item_type._encode(value + bound_shift, endianness)
@staticmethod
def _decode(data, pos, endianness):
value, size = sizer_item_type._decode(data, pos, endianness)
array_guard = 65536
if value > array_guard:
raise ProphyError("decoded array length over %s" % array_guard)
value -= bound_shift
if value < 0:
raise ProphyError("decoded array length smaller than shift")
return value, size
return container_len
class union_generator(_composite_generator_base):
_slots = ["_fields", "_discriminated"]
def validate(cls):
for type_ in cls._types():
if type_._DYNAMIC:
raise ProphyError("dynamic types not allowed in union")
if type_._BOUND:
raise ProphyError("bound array/bytes not allowed in union")
if issubclass(type_, base_array):
raise ProphyError("static array not implemented in union")
if type_._OPTIONAL:
raise ProphyError("union with optional field disallowed")
def add_attributes(cls):
cls._ALIGNMENT = max(u32._ALIGNMENT, max(type_._ALIGNMENT for type_ in cls._types()))
cls._BOUND = None
cls._DYNAMIC = False
cls._OPTIONAL = False
cls._PARTIAL_ALIGNMENT = None
natural_size = cls._ALIGNMENT + max(type_._SIZE for type_ in cls._types())
cls._SIZE = natural_size + distance_to_next_multiply(natural_size, cls._ALIGNMENT)
cls._UNLIMITED = False
cls._discriminator_type = u32
def add_properties(cls):
cls.add_union_discriminator_property()
for field in cls._descriptor:
if codec_kind.is_composite(field.type):
cls.add_union_composite_property(field)
else:
cls.add_union_scalar_property(field)
def add_union_discriminator_property(cls):
def getter(self):
return self._discriminated.discriminator
def setter(self, discriminator_name_or_value):
for field in self._descriptor:
if discriminator_name_or_value in (field.name, field.discriminator):
if field != self._discriminated:
self._discriminated = field
self._fields = {}
return
raise ProphyError("unknown discriminator: {!r}".format(discriminator_name_or_value))
setattr(cls, "discriminator", property(getter, setter))
def add_union_composite_property(cls, field):
def getter(self):
if self._discriminated is not field:
raise ProphyError("currently field %s is discriminated" % self._discriminated.discriminator)
value = self._fields.get(field.name)
if value is None:
value = field.type()
value = self._fields.setdefault(field.name, value)
return value
def setter(self, new_value):
raise ProphyError("assignment to composite field not allowed")
setattr(cls, field.name, property(getter, setter))
def add_union_scalar_property(cls, field):
def getter(self):
if self._discriminated is not field:
raise ProphyError("currently field %s is discriminated" % self._discriminated.discriminator)
return self._fields.get(field.name, field.type._DEFAULT)
def setter(self, new_value):
if self._discriminated is not field:
raise ProphyError("currently field %s is discriminated" % self._discriminated.discriminator)
new_value = field.type._check(new_value)
self._fields[field.name] = new_value
setattr(cls, field.name, property(getter, setter))
def _list_duplicates(iterable):
iterable = list(iterable)
return sorted((collections.Counter(iterable) - collections.Counter(set(iterable))).keys())
|
|
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# trial imports
from twisted.internet import defer
from tests import unittest
# python imports
from mock import Mock, ANY
from ..utils import MockHttpResource, MockClock, setup_test_homeserver
from synapse.federation import initialize_http_replication
from synapse.events import FrozenEvent
def make_pdu(prev_pdus=[], **kwargs):
"""Provide some default fields for making a PduTuple."""
pdu_fields = {
"state_key": None,
"prev_events": prev_pdus,
}
pdu_fields.update(kwargs)
return FrozenEvent(pdu_fields)
class FederationTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.mock_resource = MockHttpResource()
self.mock_http_client = Mock(spec=[
"get_json",
"put_json",
])
self.mock_persistence = Mock(spec=[
"prep_send_transaction",
"delivered_txn",
"get_received_txn_response",
"set_received_txn_response",
"get_destination_retry_timings",
"get_auth_chain",
])
self.mock_persistence.get_received_txn_response.return_value = (
defer.succeed(None)
)
retry_timings_res = {
"destination": "",
"retry_last_ts": 0,
"retry_interval": 0,
}
self.mock_persistence.get_destination_retry_timings.return_value = (
defer.succeed(retry_timings_res)
)
self.mock_persistence.get_auth_chain.return_value = []
self.clock = MockClock()
hs = yield setup_test_homeserver(
resource_for_federation=self.mock_resource,
http_client=self.mock_http_client,
datastore=self.mock_persistence,
clock=self.clock,
keyring=Mock(),
)
self.federation = initialize_http_replication(hs)
self.distributor = hs.get_distributor()
@defer.inlineCallbacks
def test_get_state(self):
mock_handler = Mock(spec=[
"get_state_for_pdu",
])
self.federation.set_handler(mock_handler)
mock_handler.get_state_for_pdu.return_value = defer.succeed([])
# Empty context initially
(code, response) = yield self.mock_resource.trigger(
"GET",
"/_matrix/federation/v1/state/my-context/",
None
)
self.assertEquals(200, code)
self.assertFalse(response["pdus"])
# Now lets give the context some state
mock_handler.get_state_for_pdu.return_value = (
defer.succeed([
make_pdu(
event_id="the-pdu-id",
origin="red",
user_id="@a:red",
room_id="my-context",
type="m.topic",
origin_server_ts=123456789000,
depth=1,
content={"topic": "The topic"},
state_key="",
power_level=1000,
prev_state="last-pdu-id",
),
])
)
(code, response) = yield self.mock_resource.trigger(
"GET",
"/_matrix/federation/v1/state/my-context/",
None
)
self.assertEquals(200, code)
self.assertEquals(1, len(response["pdus"]))
@defer.inlineCallbacks
def test_get_pdu(self):
mock_handler = Mock(spec=[
"get_persisted_pdu",
])
self.federation.set_handler(mock_handler)
mock_handler.get_persisted_pdu.return_value = (
defer.succeed(None)
)
(code, response) = yield self.mock_resource.trigger(
"GET",
"/_matrix/federation/v1/event/abc123def456/",
None
)
self.assertEquals(404, code)
# Now insert such a PDU
mock_handler.get_persisted_pdu.return_value = (
defer.succeed(
make_pdu(
event_id="abc123def456",
origin="red",
user_id="@a:red",
room_id="my-context",
type="m.text",
origin_server_ts=123456789001,
depth=1,
content={"text": "Here is the message"},
)
)
)
(code, response) = yield self.mock_resource.trigger(
"GET",
"/_matrix/federation/v1/event/abc123def456/",
None
)
self.assertEquals(200, code)
self.assertEquals(1, len(response["pdus"]))
self.assertEquals("m.text", response["pdus"][0]["type"])
@defer.inlineCallbacks
def test_send_pdu(self):
self.mock_http_client.put_json.return_value = defer.succeed(
(200, "OK")
)
pdu = make_pdu(
event_id="abc123def456",
origin="red",
user_id="@a:red",
room_id="my-context",
type="m.text",
origin_server_ts=123456789001,
depth=1,
content={"text": "Here is the message"},
)
yield self.federation.send_pdu(pdu, ["remote"])
self.mock_http_client.put_json.assert_called_with(
"remote",
path="/_matrix/federation/v1/send/1000000/",
data={
"origin_server_ts": 1000000,
"origin": "test",
"pdus": [
pdu.get_pdu_json(),
],
'pdu_failures': [],
},
json_data_callback=ANY,
long_retries=True,
)
@defer.inlineCallbacks
def test_send_edu(self):
self.mock_http_client.put_json.return_value = defer.succeed(
(200, "OK")
)
yield self.federation.send_edu(
destination="remote",
edu_type="m.test",
content={"testing": "content here"},
)
# MockClock ensures we can guess these timestamps
self.mock_http_client.put_json.assert_called_with(
"remote",
path="/_matrix/federation/v1/send/1000000/",
data={
"origin": "test",
"origin_server_ts": 1000000,
"pdus": [],
"edus": [
{
"edu_type": "m.test",
"content": {"testing": "content here"},
}
],
'pdu_failures': [],
},
json_data_callback=ANY,
long_retries=True,
)
@defer.inlineCallbacks
def test_recv_edu(self):
recv_observer = Mock()
recv_observer.return_value = defer.succeed(())
self.federation.register_edu_handler("m.test", recv_observer)
yield self.mock_resource.trigger(
"PUT",
"/_matrix/federation/v1/send/1001000/",
"""{
"origin": "remote",
"origin_server_ts": 1001000,
"pdus": [],
"edus": [
{
"origin": "remote",
"destination": "test",
"edu_type": "m.test",
"content": {"testing": "reply here"}
}
]
}"""
)
recv_observer.assert_called_with(
"remote", {"testing": "reply here"}
)
@defer.inlineCallbacks
def test_send_query(self):
self.mock_http_client.get_json.return_value = defer.succeed(
{"your": "response"}
)
response = yield self.federation.make_query(
destination="remote",
query_type="a-question",
args={"one": "1", "two": "2"},
)
self.assertEquals({"your": "response"}, response)
self.mock_http_client.get_json.assert_called_with(
destination="remote",
path="/_matrix/federation/v1/query/a-question",
args={"one": "1", "two": "2"},
retry_on_dns_fail=True,
)
@defer.inlineCallbacks
def test_recv_query(self):
recv_handler = Mock()
recv_handler.return_value = defer.succeed({"another": "response"})
self.federation.register_query_handler("a-question", recv_handler)
code, response = yield self.mock_resource.trigger(
"GET",
"/_matrix/federation/v1/query/a-question?three=3&four=4",
None
)
self.assertEquals(200, code)
self.assertEquals({"another": "response"}, response)
recv_handler.assert_called_with(
{"three": "3", "four": "4"}
)
|
|
#{% macro render_field( field ) %}
# <dt>{{ field.name }}</dt>
# <dd>{{ field(**kwargs)|safe }}
# {% if field.errors %}
# <ul class=errors>
# {% for error in field.error %}
# <li> {{ errror }} </li>
# {% endfor %}
# </ul>
# {% endif %}
# </dd>
#{% endmacro %}
#
#
#<form method=post action="/configure">
# <dl>
# {% for field in g.formtoreq %}
# {{ render_field(field) }}
# {% endfor %}
# </dl>
#</form>
#from flask.ext.wtf import Form, SubmitField, BooleanField, FormField, FieldList, TextField, TextAreaField, IntegerField, validators, SelectMultipleField, Required
#
#class subProjectDescription(Form):
# baseFolder = TextField("baseFolder")
# name = TextField("name")
# projtype = IntegerField("project type")
# folders = TextAreaField()
# methodol = TextField("name")
#
#
#class setupForm(Form):
# submit = SubmitField("Submit")
# # BEHAVIOR
# DoAdaptamers = BooleanField("Clean 454 Adaptamers?", description="For the 454 data, should the adaptamers be cleaned?")
# Qborder = IntegerField("Trim at Q=" , description="which Q value to trim files")
# projectsToIgnore = SelectMultipleField("Projects to Ignore", [])
#
# projectsToIgnore.choices = [("reseq", "reseq")]
#
# # PROGRAMS
# baseFolder = TextField( "Base Folder", [validators.Length(min=1, max=256), validators.Required()], description="Base folder for all analysis")
# dbFile = TextField( "Cleaning DB", [validators.Length(min=1, max=256), validators.Required()], description="Database for cleaning")
#
# proj = FormField(subProjectDescription)
# #FILES
# "base": "/home/assembly/tomato150",
# "types": [
# "DENOVO",
# "MAPPING"
# ]
# },
# "contamClean": {
# "454": {
# "db": "/home/assembly/tomato150/scripts/pipeline/progs/contam/db/contamination_without_ecolidb_v0.2.fa",
# "threshold": 85
# },
# "illumina": {
# "db": "/home/assembly/tomato150/scripts/pipeline/progs/contam/db/contamination_without_ecolidb_v0.2.fa",
# "threshold": 95
# }
# },
# "currAbsPath": "",
# "currFtpPath": "/home/assembly/tomato150/scripts/ftp",
# "debug":false,
# "does": {
# "contamination":true,
# "fastqc":true,
# "hash":true,
# "quals":true
# },
# "dscFileName": "/home/assembly/tomato150/project_description.csv",
# "exportToFile":true,
# "filter454": {
# "compressHomopolymerSize": 1,
# "filterDuplicates": 1,
# "maxCompressedSize": 850,
# "maxNs": 1,
# "minCompressedSize": 50,
# "seedLength": 50,
# "trim5": 0.1
# },
# "folder": [
# "/home/assembly/tomato150",
# "reseq",
# 1,
# [
# "001",
# "002",
# "003",
# "004",
# "005",
# "006",
# "007",
# "008",
# "011",
# "012",
# "013",
# "014",
# "015",
# "016",
# "017",
# "018",
# "019",
# "020",
# "021",
# "022",
# "023",
# "024",
# "026",
# "027",
# "028",
# "029",
# "030",
# "031",
# "032",
# "033",
# "034",
# "035",
# "036",
# "037",
# "038",
# "039",
# "040",
# "041",
# "077",
# "078",
# "088",
# "089",
# "090",
# "091",
# "093",
# "094",
# "096",
# "097",
# "102",
# "103",
# "105",
# "025",
# "042",
# "043",
# "044",
# "045",
# "046",
# "047",
# "049",
# "051",
# "052",
# "053",
# "054",
# "055",
# "056",
# "057",
# "058",
# "059",
# "060",
# "062",
# "063",
# "064",
# "065",
# "066",
# "067",
# "068",
# "069",
# "070",
# "071",
# "072",
# "073",
# "074",
# "075",
# "104naturalis_Moneymaker-CF4N705",
# "naturalis_Moneymaker-CF4N706",
# "naturalis_Moneymaker-oldN703",
# "naturalis_Moneymaker-oldN704",
# "naturalis_Slyc-17N701",
# "naturalis_Slyc-17N702"
# ],
# [
# [
# "illumina",
# [
# [
# "pairedend_500",
# "PE",
# "500"
# ]
# ]
# ]
# ],
# false,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "mapped"
# ]
# ],
# "folderType": 1,
# "folders": [
# [
# "/home/assembly/tomato150/denovo",
# "arcanum",
# 0,
# null,
# [
# [
# "illumina",
# [
# [
# "pairedend_170",
# "PE",
# "170"
# ],
# [
# "matepair_2000",
# "MP",
# "2000"
# ]
# ]
# ],
# [
# "454",
# [
# [
# "8000",
# "MP",
# "8000"
# ],
# [
# "20000",
# "MP",
# "20000"
# ],
# [
# "shotgun",
# "WGS",
# null
# ]
# ]
# ]
# ],
# true,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "assembled"
# ]
# ],
# [
# "/home/assembly/tomato150/denovo",
# "habrochaites",
# 0,
# null,
# [
# [
# "illumina",
# [
# [
# "pairedend_170",
# "PE",
# "170"
# ],
# [
# "matepair_2000",
# "MP",
# "2000"
# ]
# ]
# ],
# [
# "454",
# [
# [
# "8000",
# "MP",
# "8000"
# ],
# [
# "20000",
# "MP",
# "20000"
# ],
# [
# "shotgun",
# "WGS",
# null
# ]
# ]
# ]
# ],
# true,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "assembled"
# ]
# ],
# [
# "/home/assembly/tomato150/denovo",
# "pennellii",
# 0,
# null,
# [
# [
# "illumina",
# [
# [
# "pairedend_170",
# "PE",
# "170"
# ],
# [
# "matepair_2000",
# "MP",
# "2000"
# ]
# ]
# ],
# [
# "454",
# [
# [
# "8000",
# "MP",
# "8000"
# ],
# [
# "20000",
# "MP",
# "20000"
# ],
# [
# "shotgun",
# "WGS",
# null
# ],
# [
# "3000",
# "MP",
# "3000"
# ]
# ]
# ]
# ],
# true,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "assembled"
# ]
# ],
# [
# "/home/assembly/tomato150",
# "ril",
# 1,
# [
# "601",
# "603",
# "608",
# "609",
# "610",
# "611",
# "612",
# "614",
# "615",
# "618",
# "619",
# "622",
# "623",
# "624",
# "625",
# "626",
# "630",
# "631",
# "634",
# "639",
# "643",
# "644",
# "646",
# "648",
# "649",
# "651",
# "653",
# "654",
# "656",
# "658",
# "659",
# "660",
# "665",
# "666",
# "667",
# "668",
# "669",
# "670",
# "674",
# "675",
# "676",
# "678",
# "679",
# "682",
# "684",
# "685",
# "688",
# "691",
# "692",
# "693",
# "694",
# "696",
# "697",
# "701",
# "702",
# "705",
# "706",
# "707",
# "710",
# "711"
# ],
# [
# [
# "illumina",
# [
# [
# "pairedend_500",
# "PE",
# "500"
# ]
# ]
# ]
# ],
# true,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "mapped"
# ]
# ],
# [
# "/home/assembly/tomato150",
# "reseq",
# 1,
# [
# "001",
# "002",
# "003",
# "004",
# "005",
# "006",
# "007",
# "008",
# "011",
# "012",
# "013",
# "014",
# "015",
# "016",
# "017",
# "018",
# "019",
# "020",
# "021",
# "022",
# "023",
# "024",
# "026",
# "027",
# "028",
# "029",
# "030",
# "031",
# "032",
# "033",
# "034",
# "035",
# "036",
# "037",
# "038",
# "039",
# "040",
# "041",
# "077",
# "078",
# "088",
# "089",
# "090",
# "091",
# "093",
# "094",
# "096",
# "097",
# "102",
# "103",
# "105",
# "025",
# "042",
# "043",
# "044",
# "045",
# "046",
# "047",
# "049",
# "051",
# "052",
# "053",
# "054",
# "055",
# "056",
# "057",
# "058",
# "059",
# "060",
# "062",
# "063",
# "064",
# "065",
# "066",
# "067",
# "068",
# "069",
# "070",
# "071",
# "072",
# "073",
# "074",
# "075",
# "104naturalis_Moneymaker-CF4N705",
# "naturalis_Moneymaker-CF4N706",
# "naturalis_Moneymaker-oldN703",
# "naturalis_Moneymaker-oldN704",
# "naturalis_Slyc-17N701",
# "naturalis_Slyc-17N702"
# ],
# [
# [
# "illumina",
# [
# [
# "pairedend_500",
# "PE",
# "500"
# ]
# ]
# ]
# ],
# false,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "mapped"
# ]
# ]
# ],
# "genomeSize": 950000000,
# "ignoreDb":false,
# "jellyfishParams": {
# "_extra": "--both-strands",
# "counter-len": 7,
# "high": 300,
# "increment": 1,
# "low": 1,
# "lower-count": 1,
# "mer-len": 19,
# "out-buffer-size": 800000000,
# "out-counter-len": 4,
# "size": 800000000,
# "threads": 8
# },
# "key": "quakecutoff",
# "loadLocalDbs":true,
# "mappingLibs": [
# [
# "illumina",
# [
# [
# "pairedend_500",
# "PE",
# "500"
# ]
# ]
# ]
# ],
# "maxThreads": 1,
# "mergePdfs":true,
# "nameLength": 12,
# "projLength": 6,
# "quakeParams": {
# "--no_cut":false,
# "--ratio": 800,
# "-p": 10
# },
# "quakecutoff": {
# "arcanum": 4,
# "habrochaites": 4,
# "pennellii": 4
# },
# "quakeignore": [
# "ril",
# "reseq"
# ],
# "redo": {
# "compression":false,
# "contamination":false,
# "fastqc":false,
# "hash":false,
# "info":false,
# "quals":false
# },
# "redoReport":false,
# "replaceFiles":true,
# "reseqSubs": [
# "001",
# "002",
# "003",
# "004",
# "005",
# "006",
# "007",
# "008",
# "011",
# "012",
# "013",
# "014",
# "015",
# "016",
# "017",
# "018",
# "019",
# "020",
# "021",
# "022",
# "023",
# "024",
# "026",
# "027",
# "028",
# "029",
# "030",
# "031",
# "032",
# "033",
# "034",
# "035",
# "036",
# "037",
# "038",
# "039",
# "040",
# "041",
# "077",
# "078",
# "088",
# "089",
# "090",
# "091",
# "093",
# "094",
# "096",
# "097",
# "102",
# "103",
# "105",
# "025",
# "042",
# "043",
# "044",
# "045",
# "046",
# "047",
# "049",
# "051",
# "052",
# "053",
# "054",
# "055",
# "056",
# "057",
# "058",
# "059",
# "060",
# "062",
# "063",
# "064",
# "065",
# "066",
# "067",
# "068",
# "069",
# "070",
# "071",
# "072",
# "073",
# "074",
# "075",
# "104naturalis_Moneymaker-CF4N705",
# "naturalis_Moneymaker-CF4N706",
# "naturalis_Moneymaker-oldN703",
# "naturalis_Moneymaker-oldN704",
# "naturalis_Slyc-17N701",
# "naturalis_Slyc-17N702"
# ],
# "rilSubs": [
# "601",
# "603",
# "608",
# "609",
# "610",
# "611",
# "612",
# "614",
# "615",
# "618",
# "619",
# "622",
# "623",
# "624",
# "625",
# "626",
# "630",
# "631",
# "634",
# "639",
# "643",
# "644",
# "646",
# "648",
# "649",
# "651",
# "653",
# "654",
# "656",
# "658",
# "659",
# "660",
# "665",
# "666",
# "667",
# "668",
# "669",
# "670",
# "674",
# "675",
# "676",
# "678",
# "679",
# "682",
# "684",
# "685",
# "688",
# "691",
# "692",
# "693",
# "694",
# "696",
# "697",
# "701",
# "702",
# "705",
# "706",
# "707",
# "710",
# "711"
# ],
# "runFastqc":false,
# "runGenData":true,
# "runJellyfish":true,
# "runQuake":true,
# "runSolexaqa":false,
# "samplesToIgnore": [],
# "skip454":false,
# "skipIllumina":false,
# "sleepWhileWaiting": 10,
# "startTime": "20121215021329",
# "status": [
# "filtered",
# "FILTERED",
# "docs",
# "filtered"
# ],
# "programs": {
# "dymTrim": {
# "exeDynamicTrim": "perl /mnt/nexenta/assembly/nobackup/dev_150/scripts/pipeline/progs/solexaqa/DynamicTrim.pl",
# "exeLengthSort": "perl /mnt/nexenta/assembly/nobackup/dev_150/scripts/pipeline/progs/solexaqa/LengthSort.pl",
# "tmp": "/mnt/nexenta/assembly/nobackup/tmp"
# },
# "fastqCount": {
# "exe": "/home/aflit001/bin/fastqCount",
# "tmp": "/run/shm"
# },
# "fastqScreen": {
# "exe": "fastq_screen",
# "subset": 5000,
# "threads": 8
# },
# "fastqc": {
# "exe": "perl /home/assembly/tomato150/scripts/pipeline/progs/FastQC/fastqc",
# "threads": 4
# },
# "filter454": {
# "exeAnalyze": "python /mnt/nexenta/assembly/nobackup/dev_150/scripts/pipeline/progs/filter454/analyze454Reads.py",
# "exeFilter": "python /mnt/nexenta/assembly/nobackup/dev_150/scripts/pipeline/progs/filter454/filter454Reads.py",
# "exeFq2Fa": "/home/assembly/bin/fastq_to_fasta",
# "exeSffFile": "/opt/454/2.6_1/bin/sfffile",
# "exeSffInfo": "/opt/454/2.6_1/bin/sffinfo",
# "tmp": "/run/shm"
# },
# "jellyfish": {
# "exe": "jellyfish",
# "pv": " | pv --buffer-size 16M -q ",
# "tmp": "/run/shm"
# },
# "mkplot": {
# "miY": 3,
# "q": 80
# },
# "quake": {
# "exe": "python progs/Quake/bin/quake.py",
# "tmp": "/home/assembly/tmp"
# },
# "sffExtract": {
# "exeFastaAndQualMerge": "python /home/assembly/tomato150/scripts/pipeline/progs/fastqmergefastaandqual.py",
# "exeSffFile": "/opt/454/2.6_1/bin/sfffile",
# "exeSffInfo": "/opt/454/2.6_1/bin/sffinfo"
# },
# "solexaqa": {
# "exe": "perl /home/assembly/tomato150/scripts/pipeline/progs/solexaqa/SolexaQA.pl"
# },
# "zip": {
# "exe": "pigz"
# }
# },
# "pv": " | pv --buffer-size 16M -q ",
# "sffExt": [
# "/opt/454/2.6_1/bin/sffinfo -s %(in)s | pv --buffer-size 16M -q > %(out)s.fasta",
# "/opt/454/2.6_1/bin/sffinfo -q %(in)s | pv --buffer-size 16M -q > %(out)s.fasta.qual",
# "python /home/assembly/tomato150/scripts/pipeline/progs/fastqmergefastaandqual.py %(out)s.fasta %(out)s.fasta.qual %(out)s",
# "rm -f %(out)s.fasta %(out)s.fasta.qual"
# ],
# "contamClean": {
# "454": {
# "db": "/home/assembly/tomato150/scripts/pipeline/progs/contam/db/contamination_without_ecolidb_v0.2.fa",
# "threshold": 85
# },
# "illumina": {
# "db": "/home/assembly/tomato150/scripts/pipeline/progs/contam/db/contamination_without_ecolidb_v0.2.fa",
# "threshold": 95
# }
# },
# "currAbsPath": "",
# "currFtpPath": "/home/assembly/tomato150/scripts/ftp",
# "debug":false,
# "does": {
# "contamination":true,
# "fastqc":true,
# "hash":true,
# "quals":true
# },
# "dscFileName": "/home/assembly/tomato150/project_description.csv",
# "exportToFile":true,
# "filter454": {
# "compressHomopolymerSize": 1,
# "filterDuplicates": 1,
# "maxCompressedSize": 850,
# "maxNs": 1,
# "minCompressedSize": 50,
# "seedLength": 50,
# "trim5": 0.1
# },
# "folder": [
# "/home/assembly/tomato150",
# "reseq",
# 1,
# [
# "001",
# "002",
# "003",
# "004",
# "005",
# "006",
# "007",
# "008",
# "011",
# "012",
# "013",
# "014",
# "015",
# "016",
# "017",
# "018",
# "019",
# "020",
# "021",
# "022",
# "023",
# "024",
# "026",
# "027",
# "028",
# "029",
# "030",
# "031",
# "032",
# "033",
# "034",
# "035",
# "036",
# "037",
# "038",
# "039",
# "040",
# "041",
# "077",
# "078",
# "088",
# "089",
# "090",
# "091",
# "093",
# "094",
# "096",
# "097",
# "102",
# "103",
# "105",
# "025",
# "042",
# "043",
# "044",
# "045",
# "046",
# "047",
# "049",
# "051",
# "052",
# "053",
# "054",
# "055",
# "056",
# "057",
# "058",
# "059",
# "060",
# "062",
# "063",
# "064",
# "065",
# "066",
# "067",
# "068",
# "069",
# "070",
# "071",
# "072",
# "073",
# "074",
# "075",
# "104naturalis_Moneymaker-CF4N705",
# "naturalis_Moneymaker-CF4N706",
# "naturalis_Moneymaker-oldN703",
# "naturalis_Moneymaker-oldN704",
# "naturalis_Slyc-17N701",
# "naturalis_Slyc-17N702"
# ],
# [
# [
# "illumina",
# [
# [
# "pairedend_500",
# "PE",
# "500"
# ]
# ]
# ]
# ],
# false,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "mapped"
# ]
# ],
# "folderType": 1,
# "folders": [
# [
# "/home/assembly/tomato150/denovo",
# "arcanum",
# 0,
# null,
# [
# [
# "illumina",
# [
# [
# "pairedend_170",
# "PE",
# "170"
# ],
# [
# "matepair_2000",
# "MP",
# "2000"
# ]
# ]
# ],
# [
# "454",
# [
# [
# "8000",
# "MP",
# "8000"
# ],
# [
# "20000",
# "MP",
# "20000"
# ],
# [
# "shotgun",
# "WGS",
# null
# ]
# ]
# ]
# ],
# true,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "assembled"
# ]
# ],
# [
# "/home/assembly/tomato150/denovo",
# "habrochaites",
# 0,
# null,
# [
# [
# "illumina",
# [
# [
# "pairedend_170",
# "PE",
# "170"
# ],
# [
# "matepair_2000",
# "MP",
# "2000"
# ]
# ]
# ],
# [
# "454",
# [
# [
# "8000",
# "MP",
# "8000"
# ],
# [
# "20000",
# "MP",
# "20000"
# ],
# [
# "shotgun",
# "WGS",
# null
# ]
# ]
# ]
# ],
# true,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "assembled"
# ]
# ],
# [
# "/home/assembly/tomato150/denovo",
# "pennellii",
# 0,
# null,
# [
# [
# "illumina",
# [
# [
# "pairedend_170",
# "PE",
# "170"
# ],
# [
# "matepair_2000",
# "MP",
# "2000"
# ]
# ]
# ],
# [
# "454",
# [
# [
# "8000",
# "MP",
# "8000"
# ],
# [
# "20000",
# "MP",
# "20000"
# ],
# [
# "shotgun",
# "WGS",
# null
# ],
# [
# "3000",
# "MP",
# "3000"
# ]
# ]
# ]
# ],
# true,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "assembled"
# ]
# ],
# [
# "/home/assembly/tomato150",
# "ril",
# 1,
# [
# "601",
# "603",
# "608",
# "609",
# "610",
# "611",
# "612",
# "614",
# "615",
# "618",
# "619",
# "622",
# "623",
# "624",
# "625",
# "626",
# "630",
# "631",
# "634",
# "639",
# "643",
# "644",
# "646",
# "648",
# "649",
# "651",
# "653",
# "654",
# "656",
# "658",
# "659",
# "660",
# "665",
# "666",
# "667",
# "668",
# "669",
# "670",
# "674",
# "675",
# "676",
# "678",
# "679",
# "682",
# "684",
# "685",
# "688",
# "691",
# "692",
# "693",
# "694",
# "696",
# "697",
# "701",
# "702",
# "705",
# "706",
# "707",
# "710",
# "711"
# ],
# [
# [
# "illumina",
# [
# [
# "pairedend_500",
# "PE",
# "500"
# ]
# ]
# ]
# ],
# true,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "mapped"
# ]
# ],
# [
# "/home/assembly/tomato150",
# "reseq",
# 1,
# [
# "001",
# "002",
# "003",
# "004",
# "005",
# "006",
# "007",
# "008",
# "011",
# "012",
# "013",
# "014",
# "015",
# "016",
# "017",
# "018",
# "019",
# "020",
# "021",
# "022",
# "023",
# "024",
# "026",
# "027",
# "028",
# "029",
# "030",
# "031",
# "032",
# "033",
# "034",
# "035",
# "036",
# "037",
# "038",
# "039",
# "040",
# "041",
# "077",
# "078",
# "088",
# "089",
# "090",
# "091",
# "093",
# "094",
# "096",
# "097",
# "102",
# "103",
# "105",
# "025",
# "042",
# "043",
# "044",
# "045",
# "046",
# "047",
# "049",
# "051",
# "052",
# "053",
# "054",
# "055",
# "056",
# "057",
# "058",
# "059",
# "060",
# "062",
# "063",
# "064",
# "065",
# "066",
# "067",
# "068",
# "069",
# "070",
# "071",
# "072",
# "073",
# "074",
# "075",
# "104naturalis_Moneymaker-CF4N705",
# "naturalis_Moneymaker-CF4N706",
# "naturalis_Moneymaker-oldN703",
# "naturalis_Moneymaker-oldN704",
# "naturalis_Slyc-17N701",
# "naturalis_Slyc-17N702"
# ],
# [
# [
# "illumina",
# [
# [
# "pairedend_500",
# "PE",
# "500"
# ]
# ]
# ]
# ],
# false,
# [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp",
# "mapped"
# ]
# ]
# ],
# "genomeSize": 950000000,
# "ignoreDb":false,
# "jellyfishParams": {
# "_extra": "--both-strands",
# "counter-len": 7,
# "high": 300,
# "increment": 1,
# "low": 1,
# "lower-count": 1,
# "mer-len": 19,
# "out-buffer-size": 800000000,
# "out-counter-len": 4,
# "size": 800000000,
# "threads": 8
# },
# "key": "quakecutoff",
# "loadLocalDbs":true,
# "mappingLibs": [
# [
# "illumina",
# [
# [
# "pairedend_500",
# "PE",
# "500"
# ]
# ]
# ]
# ],
# "maxThreads": 1,
# "mergePdfs":true,
# "nameLength": 12,
# "projLength": 6,
# "projectsToIgnore": [
# "reseq"
# ],
# "quakeParams": {
# "--no_cut":false,
# "--ratio": 800,
# "-p": 10
# },
# "quakecutoff": {
# "arcanum": 4,
# "habrochaites": 4,
# "pennellii": 4
# },
# "quakeignore": [
# "ril",
# "reseq"
# ],
# "redo": {
# "compression":false,
# "contamination":false,
# "fastqc":false,
# "hash":false,
# "info":false,
# "quals":false
# },
# "redoReport":false,
# "replaceFiles":true,
# "reseqSubs": [
# "001",
# "002",
# "003",
# "004",
# "005",
# "006",
# "007",
# "008",
# "011",
# "012",
# "013",
# "014",
# "015",
# "016",
# "017",
# "018",
# "019",
# "020",
# "021",
# "022",
# "023",
# "024",
# "026",
# "027",
# "028",
# "029",
# "030",
# "031",
# "032",
# "033",
# "034",
# "035",
# "036",
# "037",
# "038",
# "039",
# "040",
# "041",
# "077",
# "078",
# "088",
# "089",
# "090",
# "091",
# "093",
# "094",
# "096",
# "097",
# "102",
# "103",
# "105",
# "025",
# "042",
# "043",
# "044",
# "045",
# "046",
# "047",
# "049",
# "051",
# "052",
# "053",
# "054",
# "055",
# "056",
# "057",
# "058",
# "059",
# "060",
# "062",
# "063",
# "064",
# "065",
# "066",
# "067",
# "068",
# "069",
# "070",
# "071",
# "072",
# "073",
# "074",
# "075",
# "104naturalis_Moneymaker-CF4N705",
# "naturalis_Moneymaker-CF4N706",
# "naturalis_Moneymaker-oldN703",
# "naturalis_Moneymaker-oldN704",
# "naturalis_Slyc-17N701",
# "naturalis_Slyc-17N702"
# ],
# "rilSubs": [
# "601",
# "603",
# "608",
# "609",
# "610",
# "611",
# "612",
# "614",
# "615",
# "618",
# "619",
# "622",
# "623",
# "624",
# "625",
# "626",
# "630",
# "631",
# "634",
# "639",
# "643",
# "644",
# "646",
# "648",
# "649",
# "651",
# "653",
# "654",
# "656",
# "658",
# "659",
# "660",
# "665",
# "666",
# "667",
# "668",
# "669",
# "670",
# "674",
# "675",
# "676",
# "678",
# "679",
# "682",
# "684",
# "685",
# "688",
# "691",
# "692",
# "693",
# "694",
# "696",
# "697",
# "701",
# "702",
# "705",
# "706",
# "707",
# "710",
# "711"
# ],
# "runFastqc":false,
# "runGenData":true,
# "runJellyfish":true,
# "runQuake":true,
# "runSolexaqa":false,
# "samplesToIgnore": [],
# "skip454":false,
# "skipIllumina":false,
# "sleepWhileWaiting": 10,
# "startTime": "20121215021329",
# "status": [
# "filtered",
# "FILTERED",
# "docs",
# "filtered"
# ],
# "statusFolder": "filtered",
# "statuses": [
# [
# "_prelim",
# "PRELIMINARY",
# "_prelim",
# "_prelim"
# ],
# [
# "_prefiltered",
# "PREFILTER",
# "_prefiltered",
# "_prefiltered"
# ],
# [
# "raw",
# "CHECKED",
# "docs",
# "raw"
# ],
# [
# "filtered",
# "FILTERED",
# "docs",
# "filtered"
# ]
# ],
# "statusesToClean": [
# "CHECKED"
# ],
# "structs": [
# "docs",
# "_prelim",
# "_prefiltered",
# "raw",
# "filtered",
# "_tmp"
# ],
# "tmpFolder": "_tmp",
# "trimFastq": {
# "-h": 20,
# "-l": 30
# }
#}
|
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import js2c
import multiprocessing
import optparse
import os
import random
import re
import shutil
import signal
import string
import subprocess
import sys
import time
FILENAME = "src/runtime.cc"
HEADERFILENAME = "src/runtime.h"
FUNCTION = re.compile("^RUNTIME_FUNCTION\(Runtime_(\w+)")
ARGSLENGTH = re.compile(".*DCHECK\(.*args\.length\(\) == (\d+)\);")
FUNCTIONEND = "}\n"
MACRO = re.compile(r"^#define ([^ ]+)\(([^)]*)\) *([^\\]*)\\?\n$")
FIRST_WORD = re.compile("^\s*(.*?)[\s({\[]")
WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
BASEPATH = os.path.join(WORKSPACE, "test", "mjsunit", "runtime-gen")
THIS_SCRIPT = os.path.relpath(sys.argv[0])
# Expand these macros, they define further runtime functions.
EXPAND_MACROS = [
"BUFFER_VIEW_GETTER",
"DATA_VIEW_GETTER",
"DATA_VIEW_SETTER",
"RUNTIME_UNARY_MATH",
]
# TODO(jkummerow): We could also whitelist the following macros, but the
# functions they define are so trivial that it's unclear how much benefit
# that would provide:
# ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
# FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
# TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
# Counts of functions in each detection state. These are used to assert
# that the parser doesn't bit-rot. Change the values as needed when you add,
# remove or change runtime functions, but make sure we don't lose our ability
# to parse them!
EXPECTED_FUNCTION_COUNT = 429
EXPECTED_FUZZABLE_COUNT = 332
EXPECTED_CCTEST_COUNT = 7
EXPECTED_UNKNOWN_COUNT = 16
EXPECTED_BUILTINS_COUNT = 808
# Don't call these at all.
BLACKLISTED = [
"Abort", # Kills the process.
"AbortJS", # Kills the process.
"CompileForOnStackReplacement", # Riddled with DCHECK.
"IS_VAR", # Not implemented in the runtime.
"ListNatives", # Not available in Release mode.
"SetAllocationTimeout", # Too slow for fuzzing.
"SystemBreak", # Kills (int3) the process.
# These are weird. They violate some invariants when called after
# bootstrapping.
"DisableAccessChecks",
"EnableAccessChecks",
# The current LiveEdit implementation relies on and messes with internals
# in ways that makes it fundamentally unfuzzable :-(
"DebugGetLoadedScripts",
"DebugSetScriptSource",
"LiveEditFindSharedFunctionInfosForScript",
"LiveEditFunctionSourceUpdated",
"LiveEditGatherCompileInfo",
"LiveEditPatchFunctionPositions",
"LiveEditReplaceFunctionCode",
"LiveEditReplaceRefToNestedFunction",
"LiveEditReplaceScript",
"LiveEditRestartFrame",
"SetScriptBreakPoint",
# TODO(jkummerow): Fix these and un-blacklist them!
"CreateDateTimeFormat",
"CreateNumberFormat",
# TODO(danno): Fix these internal function that are only callable form stubs
# and un-blacklist them!
"NumberToString",
"RxegExpConstructResult",
"RegExpExec",
"StringAdd",
"SubString",
"StringCompare",
"StringCharCodeAt",
"GetFromCache",
# Compilation
"CompileUnoptimized",
"CompileOptimized",
"TryInstallOptimizedCode",
"NotifyDeoptimized",
"NotifyStubFailure",
# Utilities
"AllocateInNewSpace",
"AllocateInTargetSpace",
"AllocateHeapNumber",
"NumberToSmi",
"NumberToStringSkipCache",
"NewSloppyArguments",
"NewStrictArguments",
# Harmony
"CreateJSGeneratorObject",
"SuspendJSGeneratorObject",
"ResumeJSGeneratorObject",
"ThrowGeneratorStateError",
# Arrays
"ArrayConstructor",
"InternalArrayConstructor",
"NormalizeElements",
# Literals
"MaterializeRegExpLiteral",
"CreateObjectLiteral",
"CreateArrayLiteral",
"CreateArrayLiteralStubBailout",
# Statements
"NewClosure",
"NewClosureFromStubFailure",
"NewObject",
"NewObjectWithAllocationSite",
"FinalizeInstanceSize",
"Throw",
"ReThrow",
"ThrowReferenceError",
"ThrowNotDateError",
"StackGuard",
"Interrupt",
"PromoteScheduledException",
# Contexts
"NewGlobalContext",
"NewFunctionContext",
"PushWithContext",
"PushCatchContext",
"PushBlockContext",
"PushModuleContext",
"DeleteLookupSlot",
"LoadLookupSlot",
"LoadLookupSlotNoReferenceError",
"StoreLookupSlot",
# Declarations
"DeclareGlobals",
"DeclareModules",
"DeclareContextSlot",
"InitializeConstGlobal",
"InitializeConstContextSlot",
# Eval
"ResolvePossiblyDirectEval",
# Maths
"MathPowSlow",
"MathPowRT"
]
# These will always throw.
THROWS = [
"CheckExecutionState", # Needs to hit a break point.
"CheckIsBootstrapping", # Needs to be bootstrapping.
"DebugEvaluate", # Needs to hit a break point.
"DebugEvaluateGlobal", # Needs to hit a break point.
"DebugIndexedInterceptorElementValue", # Needs an indexed interceptor.
"DebugNamedInterceptorPropertyValue", # Needs a named interceptor.
"DebugSetScriptSource", # Checks compilation state of script.
"GetAllScopesDetails", # Needs to hit a break point.
"GetFrameCount", # Needs to hit a break point.
"GetFrameDetails", # Needs to hit a break point.
"GetRootNaN", # Needs to be bootstrapping.
"GetScopeCount", # Needs to hit a break point.
"GetScopeDetails", # Needs to hit a break point.
"GetStepInPositions", # Needs to hit a break point.
"GetTemplateField", # Needs a {Function,Object}TemplateInfo.
"GetThreadCount", # Needs to hit a break point.
"GetThreadDetails", # Needs to hit a break point.
"IsAccessAllowedForObserver", # Needs access-check-required object.
"UnblockConcurrentRecompilation" # Needs --block-concurrent-recompilation.
]
# Definitions used in CUSTOM_KNOWN_GOOD_INPUT below.
_BREAK_ITERATOR = (
"%GetImplFromInitializedIntlObject(new Intl.v8BreakIterator())")
_COLLATOR = "%GetImplFromInitializedIntlObject(new Intl.Collator('en-US'))"
_DATETIME_FORMAT = (
"%GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'))")
_NUMBER_FORMAT = (
"%GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'))")
# Custom definitions for function input that does not throw.
# Format: "FunctionName": ["arg0", "arg1", ..., argslength].
# None means "fall back to autodetected value".
CUSTOM_KNOWN_GOOD_INPUT = {
"AddNamedProperty": [None, "\"bla\"", None, None, None],
"AddPropertyForTemplate": [None, 10, None, None, None],
"Apply": ["function() {}", None, None, None, None, None],
"ArrayBufferSliceImpl": [None, None, 0, None],
"ArrayConcat": ["[1, 'a']", None],
"BreakIteratorAdoptText": [_BREAK_ITERATOR, None, None],
"BreakIteratorBreakType": [_BREAK_ITERATOR, None],
"BreakIteratorCurrent": [_BREAK_ITERATOR, None],
"BreakIteratorFirst": [_BREAK_ITERATOR, None],
"BreakIteratorNext": [_BREAK_ITERATOR, None],
"CompileString": [None, "false", None],
"CreateBreakIterator": ["'en-US'", "{type: 'string'}", None, None],
"CreateJSFunctionProxy": [None, "function() {}", None, None, None],
"CreatePrivateSymbol": ["\"foo\"", None],
"CreatePrivateOwnSymbol": ["\"foo\"", None],
"CreateSymbol": ["\"foo\"", None],
"DateParseString": [None, "new Array(8)", None],
"DefineAccessorPropertyUnchecked": [None, None, "function() {}",
"function() {}", 2, None],
"FunctionBindArguments": [None, None, "undefined", None, None],
"GetBreakLocations": [None, 0, None],
"GetDefaultReceiver": ["function() {}", None],
"GetImplFromInitializedIntlObject": ["new Intl.NumberFormat('en-US')", None],
"InternalCompare": [_COLLATOR, None, None, None],
"InternalDateFormat": [_DATETIME_FORMAT, None, None],
"InternalDateParse": [_DATETIME_FORMAT, None, None],
"InternalNumberFormat": [_NUMBER_FORMAT, None, None],
"InternalNumberParse": [_NUMBER_FORMAT, None, None],
"IsSloppyModeFunction": ["function() {}", None],
"LoadMutableDouble": ["{foo: 1.2}", None, None],
"NewObjectFromBound": ["(function() {}).bind({})", None],
"NumberToRadixString": [None, "2", None],
"ParseJson": ["\"{}\"", 1],
"RegExpExecMultiple": [None, None, "['a']", "['a']", None],
"DefineApiAccessorProperty": [None, None, "undefined", "undefined", None, None],
"SetIteratorInitialize": [None, None, "2", None],
"SetDebugEventListener": ["undefined", None, None],
"SetFunctionBreakPoint": [None, 218, None, None],
"StringBuilderConcat": ["[1, 2, 3]", 3, None, None],
"StringBuilderJoin": ["['a', 'b']", 4, None, None],
"StringMatch": [None, None, "['a', 'b']", None],
"StringNormalize": [None, 2, None],
"StringReplaceGlobalRegExpWithString": [None, None, None, "['a']", None],
"TypedArrayInitialize": [None, 6, "new ArrayBuffer(8)", None, 4, None],
"TypedArrayInitializeFromArrayLike": [None, 6, None, None, None],
"TypedArraySetFastCases": [None, None, "0", None],
"FunctionIsArrow": ["() => null", None],
}
# Types of arguments that cannot be generated in a JavaScript testcase.
NON_JS_TYPES = [
"Code", "Context", "FixedArray", "FunctionTemplateInfo",
"JSFunctionResultCache", "JSMessageObject", "Map", "ScopeInfo",
"SharedFunctionInfo"]
class Generator(object):
def RandomVariable(self, varname, vartype, simple):
if simple:
return self._Variable(varname, self.GENERATORS[vartype][0])
return self.GENERATORS[vartype][1](self, varname,
self.DEFAULT_RECURSION_BUDGET)
@staticmethod
def IsTypeSupported(typename):
return typename in Generator.GENERATORS
USUAL_SUSPECT_PROPERTIES = ["size", "length", "byteLength", "__proto__",
"prototype", "0", "1", "-1"]
DEFAULT_RECURSION_BUDGET = 2
PROXY_TRAPS = """{
getOwnPropertyDescriptor: function(name) {
return {value: function() {}, configurable: true, writable: true,
enumerable: true};
},
getPropertyDescriptor: function(name) {
return {value: function() {}, configurable: true, writable: true,
enumerable: true};
},
getOwnPropertyNames: function() { return []; },
getPropertyNames: function() { return []; },
defineProperty: function(name, descriptor) {},
delete: function(name) { return true; },
fix: function() {}
}"""
def _Variable(self, name, value, fallback=None):
args = { "name": name, "value": value, "fallback": fallback }
if fallback:
wrapper = "try { %%s } catch(e) { var %(name)s = %(fallback)s; }" % args
else:
wrapper = "%s"
return [wrapper % ("var %(name)s = %(value)s;" % args)]
def _Boolean(self, name, recursion_budget):
return self._Variable(name, random.choice(["true", "false"]))
def _Oddball(self, name, recursion_budget):
return self._Variable(name,
random.choice(["true", "false", "undefined", "null"]))
def _StrictMode(self, name, recursion_budget):
return self._Variable(name, random.choice([0, 1]))
def _Int32(self, name, recursion_budget=0):
die = random.random()
if die < 0.5:
value = random.choice([-3, -1, 0, 1, 2, 10, 515, 0x3fffffff, 0x7fffffff,
0x40000000, -0x40000000, -0x80000000])
elif die < 0.75:
value = random.randint(-1000, 1000)
else:
value = random.randint(-0x80000000, 0x7fffffff)
return self._Variable(name, value)
def _Uint32(self, name, recursion_budget=0):
die = random.random()
if die < 0.5:
value = random.choice([0, 1, 2, 3, 4, 8, 0x3fffffff, 0x40000000,
0x7fffffff, 0xffffffff])
elif die < 0.75:
value = random.randint(0, 1000)
else:
value = random.randint(0, 0xffffffff)
return self._Variable(name, value)
def _Smi(self, name, recursion_budget):
die = random.random()
if die < 0.5:
value = random.choice([-5, -1, 0, 1, 2, 3, 0x3fffffff, -0x40000000])
elif die < 0.75:
value = random.randint(-1000, 1000)
else:
value = random.randint(-0x40000000, 0x3fffffff)
return self._Variable(name, value)
def _Number(self, name, recursion_budget):
die = random.random()
if die < 0.5:
return self._Smi(name, recursion_budget)
elif die < 0.6:
value = random.choice(["Infinity", "-Infinity", "NaN", "-0",
"1.7976931348623157e+308", # Max value.
"2.2250738585072014e-308", # Min value.
"4.9406564584124654e-324"]) # Min subnormal.
else:
value = random.lognormvariate(0, 15)
return self._Variable(name, value)
def _RawRandomString(self, minlength=0, maxlength=100,
alphabet=string.ascii_letters):
length = random.randint(minlength, maxlength)
result = ""
for i in xrange(length):
result += random.choice(alphabet)
return result
def _SeqString(self, name, recursion_budget):
s1 = self._RawRandomString(1, 5)
s2 = self._RawRandomString(1, 5)
# 'foo' + 'bar'
return self._Variable(name, "\"%s\" + \"%s\"" % (s1, s2))
def _SeqTwoByteString(self, name):
s1 = self._RawRandomString(1, 5)
s2 = self._RawRandomString(1, 5)
# 'foo' + unicode + 'bar'
return self._Variable(name, "\"%s\" + \"\\2082\" + \"%s\"" % (s1, s2))
def _SlicedString(self, name):
s = self._RawRandomString(20, 30)
# 'ffoo12345678901234567890'.substr(1)
return self._Variable(name, "\"%s\".substr(1)" % s)
def _ConsString(self, name):
s1 = self._RawRandomString(8, 15)
s2 = self._RawRandomString(8, 15)
# 'foo12345' + (function() { return 'bar12345';})()
return self._Variable(name,
"\"%s\" + (function() { return \"%s\";})()" % (s1, s2))
def _InternalizedString(self, name):
return self._Variable(name, "\"%s\"" % self._RawRandomString(0, 20))
def _String(self, name, recursion_budget):
die = random.random()
if die < 0.5:
string = random.choice(self.USUAL_SUSPECT_PROPERTIES)
return self._Variable(name, "\"%s\"" % string)
elif die < 0.6:
number_name = name + "_number"
result = self._Number(number_name, recursion_budget)
return result + self._Variable(name, "\"\" + %s" % number_name)
elif die < 0.7:
return self._SeqString(name, recursion_budget)
elif die < 0.8:
return self._ConsString(name)
elif die < 0.9:
return self._InternalizedString(name)
else:
return self._SlicedString(name)
def _Symbol(self, name, recursion_budget):
raw_string_name = name + "_1"
result = self._String(raw_string_name, recursion_budget)
return result + self._Variable(name, "Symbol(%s)" % raw_string_name)
def _Name(self, name, recursion_budget):
if random.random() < 0.2:
return self._Symbol(name, recursion_budget)
return self._String(name, recursion_budget)
def _JSValue(self, name, recursion_budget):
die = random.random()
raw_name = name + "_1"
if die < 0.33:
result = self._String(raw_name, recursion_budget)
return result + self._Variable(name, "new String(%s)" % raw_name)
elif die < 0.66:
result = self._Boolean(raw_name, recursion_budget)
return result + self._Variable(name, "new Boolean(%s)" % raw_name)
else:
result = self._Number(raw_name, recursion_budget)
return result + self._Variable(name, "new Number(%s)" % raw_name)
def _RawRandomPropertyName(self):
if random.random() < 0.5:
return random.choice(self.USUAL_SUSPECT_PROPERTIES)
return self._RawRandomString(0, 10)
def _AddProperties(self, name, result, recursion_budget):
propcount = random.randint(0, 3)
propname = None
for i in range(propcount):
die = random.random()
if die < 0.5:
propname = "%s_prop%d" % (name, i)
result += self._Name(propname, recursion_budget - 1)
else:
propname = "\"%s\"" % self._RawRandomPropertyName()
propvalue_name = "%s_val%d" % (name, i)
result += self._Object(propvalue_name, recursion_budget - 1)
result.append("try { %s[%s] = %s; } catch (e) {}" %
(name, propname, propvalue_name))
if random.random() < 0.2 and propname:
# Force the object to slow mode.
result.append("delete %s[%s];" % (name, propname))
def _RandomElementIndex(self, element_name, result):
if random.random() < 0.5:
return random.randint(-1000, 1000)
result += self._Smi(element_name, 0)
return element_name
def _AddElements(self, name, result, recursion_budget):
elementcount = random.randint(0, 3)
for i in range(elementcount):
element_name = "%s_idx%d" % (name, i)
index = self._RandomElementIndex(element_name, result)
value_name = "%s_elt%d" % (name, i)
result += self._Object(value_name, recursion_budget - 1)
result.append("try { %s[%s] = %s; } catch(e) {}" %
(name, index, value_name))
def _AddAccessors(self, name, result, recursion_budget):
accessorcount = random.randint(0, 3)
for i in range(accessorcount):
propname = self._RawRandomPropertyName()
what = random.choice(["get", "set"])
function_name = "%s_access%d" % (name, i)
result += self._PlainFunction(function_name, recursion_budget - 1)
result.append("try { Object.defineProperty(%s, \"%s\", {%s: %s}); } "
"catch (e) {}" % (name, propname, what, function_name))
def _PlainArray(self, name, recursion_budget):
die = random.random()
if die < 0.5:
literal = random.choice(["[]", "[1, 2]", "[1.5, 2.5]",
"['a', 'b', 1, true]"])
return self._Variable(name, literal)
else:
new = random.choice(["", "new "])
length = random.randint(0, 101000)
return self._Variable(name, "%sArray(%d)" % (new, length))
def _PlainObject(self, name, recursion_budget):
die = random.random()
if die < 0.67:
literal_propcount = random.randint(0, 3)
properties = []
result = []
for i in range(literal_propcount):
propname = self._RawRandomPropertyName()
propvalue_name = "%s_lit%d" % (name, i)
result += self._Object(propvalue_name, recursion_budget - 1)
properties.append("\"%s\": %s" % (propname, propvalue_name))
return result + self._Variable(name, "{%s}" % ", ".join(properties))
else:
return self._Variable(name, "new Object()")
def _JSArray(self, name, recursion_budget):
result = self._PlainArray(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _RawRandomBufferLength(self):
if random.random() < 0.2:
return random.choice([0, 1, 8, 0x40000000, 0x80000000])
return random.randint(0, 1000)
def _JSArrayBuffer(self, name, recursion_budget):
length = self._RawRandomBufferLength()
return self._Variable(name, "new ArrayBuffer(%d)" % length)
def _JSDataView(self, name, recursion_budget):
buffer_name = name + "_buffer"
result = self._JSArrayBuffer(buffer_name, recursion_budget)
args = [buffer_name]
die = random.random()
if die < 0.67:
offset = self._RawRandomBufferLength()
args.append("%d" % offset)
if die < 0.33:
length = self._RawRandomBufferLength()
args.append("%d" % length)
result += self._Variable(name, "new DataView(%s)" % ", ".join(args),
fallback="new DataView(new ArrayBuffer(8))")
return result
def _JSDate(self, name, recursion_budget):
die = random.random()
if die < 0.25:
return self._Variable(name, "new Date()")
elif die < 0.5:
ms_name = name + "_ms"
result = self._Number(ms_name, recursion_budget)
return result + self._Variable(name, "new Date(%s)" % ms_name)
elif die < 0.75:
str_name = name + "_str"
month = random.choice(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
"Aug", "Sep", "Oct", "Nov", "Dec"])
day = random.randint(1, 28)
year = random.randint(1900, 2100)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
str_value = ("\"%s %s, %s %s:%s:%s\"" %
(month, day, year, hour, minute, second))
result = self._Variable(str_name, str_value)
return result + self._Variable(name, "new Date(%s)" % str_name)
else:
components = tuple(map(lambda x: "%s_%s" % (name, x),
["y", "m", "d", "h", "min", "s", "ms"]))
return ([j for i in map(self._Int32, components) for j in i] +
self._Variable(name, "new Date(%s)" % ", ".join(components)))
def _PlainFunction(self, name, recursion_budget):
result_name = "result"
body = ["function() {"]
body += self._Object(result_name, recursion_budget - 1)
body.append("return result;\n}")
return self._Variable(name, "%s" % "\n".join(body))
def _JSFunction(self, name, recursion_budget):
result = self._PlainFunction(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _JSFunctionProxy(self, name, recursion_budget):
# TODO(jkummerow): Revisit this as the Proxy implementation evolves.
return self._Variable(name, "Proxy.createFunction(%s, function() {})" %
self.PROXY_TRAPS)
def _JSGeneratorObject(self, name, recursion_budget):
# TODO(jkummerow): Be more creative here?
return self._Variable(name, "(function*() { yield 1; })()")
def _JSMap(self, name, recursion_budget, weak=""):
result = self._Variable(name, "new %sMap()" % weak)
num_entries = random.randint(0, 3)
for i in range(num_entries):
key_name = "%s_k%d" % (name, i)
value_name = "%s_v%d" % (name, i)
if weak:
result += self._JSObject(key_name, recursion_budget - 1)
else:
result += self._Object(key_name, recursion_budget - 1)
result += self._Object(value_name, recursion_budget - 1)
result.append("%s.set(%s, %s)" % (name, key_name, value_name))
return result
def _JSMapIterator(self, name, recursion_budget):
map_name = name + "_map"
result = self._JSMap(map_name, recursion_budget)
iterator_type = random.choice(['keys', 'values', 'entries'])
return (result + self._Variable(name, "%s.%s()" %
(map_name, iterator_type)))
def _JSProxy(self, name, recursion_budget):
# TODO(jkummerow): Revisit this as the Proxy implementation evolves.
return self._Variable(name, "Proxy.create(%s)" % self.PROXY_TRAPS)
def _JSRegExp(self, name, recursion_budget):
flags = random.choice(["", "g", "i", "m", "gi"])
string = "a(b|c)*a" # TODO(jkummerow): Be more creative here?
ctor = random.choice(["/%s/%s", "new RegExp(\"%s\", \"%s\")"])
return self._Variable(name, ctor % (string, flags))
def _JSSet(self, name, recursion_budget, weak=""):
result = self._Variable(name, "new %sSet()" % weak)
num_entries = random.randint(0, 3)
for i in range(num_entries):
element_name = "%s_e%d" % (name, i)
if weak:
result += self._JSObject(element_name, recursion_budget - 1)
else:
result += self._Object(element_name, recursion_budget - 1)
result.append("%s.add(%s)" % (name, element_name))
return result
def _JSSetIterator(self, name, recursion_budget):
set_name = name + "_set"
result = self._JSSet(set_name, recursion_budget)
iterator_type = random.choice(['values', 'entries'])
return (result + self._Variable(name, "%s.%s()" %
(set_name, iterator_type)))
def _JSTypedArray(self, name, recursion_budget):
arraytype = random.choice(["Int8", "Int16", "Int32", "Uint8", "Uint16",
"Uint32", "Float32", "Float64", "Uint8Clamped"])
ctor_type = random.randint(0, 3)
if ctor_type == 0:
length = random.randint(0, 1000)
return self._Variable(name, "new %sArray(%d)" % (arraytype, length),
fallback="new %sArray(8)" % arraytype)
elif ctor_type == 1:
input_name = name + "_typedarray"
result = self._JSTypedArray(input_name, recursion_budget - 1)
return (result +
self._Variable(name, "new %sArray(%s)" % (arraytype, input_name),
fallback="new %sArray(8)" % arraytype))
elif ctor_type == 2:
arraylike_name = name + "_arraylike"
result = self._JSObject(arraylike_name, recursion_budget - 1)
length = random.randint(0, 1000)
result.append("try { %s.length = %d; } catch(e) {}" %
(arraylike_name, length))
return (result +
self._Variable(name,
"new %sArray(%s)" % (arraytype, arraylike_name),
fallback="new %sArray(8)" % arraytype))
else:
die = random.random()
buffer_name = name + "_buffer"
args = [buffer_name]
result = self._JSArrayBuffer(buffer_name, recursion_budget)
if die < 0.67:
offset_name = name + "_offset"
args.append(offset_name)
result += self._Int32(offset_name)
if die < 0.33:
length_name = name + "_length"
args.append(length_name)
result += self._Int32(length_name)
return (result +
self._Variable(name,
"new %sArray(%s)" % (arraytype, ", ".join(args)),
fallback="new %sArray(8)" % arraytype))
def _JSArrayBufferView(self, name, recursion_budget):
if random.random() < 0.4:
return self._JSDataView(name, recursion_budget)
else:
return self._JSTypedArray(name, recursion_budget)
def _JSWeakCollection(self, name, recursion_budget):
ctor = random.choice([self._JSMap, self._JSSet])
return ctor(name, recursion_budget, weak="Weak")
def _PropertyDetails(self, name, recursion_budget):
# TODO(jkummerow): Be more clever here?
return self._Int32(name)
def _JSObject(self, name, recursion_budget):
die = random.random()
if die < 0.4:
function = random.choice([self._PlainObject, self._PlainArray,
self._PlainFunction])
elif die < 0.5:
return self._Variable(name, "this") # Global object.
else:
function = random.choice([self._JSArrayBuffer, self._JSDataView,
self._JSDate, self._JSFunctionProxy,
self._JSGeneratorObject, self._JSMap,
self._JSMapIterator, self._JSRegExp,
self._JSSet, self._JSSetIterator,
self._JSTypedArray, self._JSValue,
self._JSWeakCollection])
result = function(name, recursion_budget)
self._AddAccessors(name, result, recursion_budget)
self._AddProperties(name, result, recursion_budget)
self._AddElements(name, result, recursion_budget)
return result
def _JSReceiver(self, name, recursion_budget):
if random.random() < 0.9: return self._JSObject(name, recursion_budget)
return self._JSProxy(name, recursion_budget)
def _HeapObject(self, name, recursion_budget):
die = random.random()
if die < 0.9: return self._JSReceiver(name, recursion_budget)
elif die < 0.95: return self._Oddball(name, recursion_budget)
else: return self._Name(name, recursion_budget)
def _Object(self, name, recursion_budget):
if recursion_budget <= 0:
function = random.choice([self._Oddball, self._Number, self._Name,
self._JSValue, self._JSRegExp])
return function(name, recursion_budget)
if random.random() < 0.2:
return self._Smi(name, recursion_budget)
return self._HeapObject(name, recursion_budget)
GENERATORS = {
"Boolean": ["true", _Boolean],
"HeapObject": ["new Object()", _HeapObject],
"Int32": ["32", _Int32],
"JSArray": ["new Array()", _JSArray],
"JSArrayBuffer": ["new ArrayBuffer(8)", _JSArrayBuffer],
"JSArrayBufferView": ["new Int32Array(2)", _JSArrayBufferView],
"JSDataView": ["new DataView(new ArrayBuffer(24))", _JSDataView],
"JSDate": ["new Date()", _JSDate],
"JSFunction": ["function() {}", _JSFunction],
"JSFunctionProxy": ["Proxy.createFunction({}, function() {})",
_JSFunctionProxy],
"JSGeneratorObject": ["(function*(){ yield 1; })()", _JSGeneratorObject],
"JSMap": ["new Map()", _JSMap],
"JSMapIterator": ["new Map().entries()", _JSMapIterator],
"JSObject": ["new Object()", _JSObject],
"JSProxy": ["Proxy.create({})", _JSProxy],
"JSReceiver": ["new Object()", _JSReceiver],
"JSRegExp": ["/ab/g", _JSRegExp],
"JSSet": ["new Set()", _JSSet],
"JSSetIterator": ["new Set().values()", _JSSetIterator],
"JSTypedArray": ["new Int32Array(2)", _JSTypedArray],
"JSValue": ["new String('foo')", _JSValue],
"JSWeakCollection": ["new WeakMap()", _JSWeakCollection],
"Name": ["\"name\"", _Name],
"Number": ["1.5", _Number],
"Object": ["new Object()", _Object],
"PropertyDetails": ["513", _PropertyDetails],
"SeqOneByteString": ["\"seq 1-byte\"", _SeqString],
"SeqString": ["\"seqstring\"", _SeqString],
"SeqTwoByteString": ["\"seq \\u2082-byte\"", _SeqTwoByteString],
"Smi": ["1", _Smi],
"StrictMode": ["1", _StrictMode],
"String": ["\"foo\"", _String],
"Symbol": ["Symbol(\"symbol\")", _Symbol],
"Uint32": ["32", _Uint32],
}
class ArgParser(object):
def __init__(self, regex, ctor):
self.regex = regex
self.ArgCtor = ctor
class Arg(object):
def __init__(self, typename, varname, index):
self.type = typename
self.name = "_%s" % varname
self.index = index
class Function(object):
def __init__(self, match):
self.name = match.group(1)
self.argslength = -1
self.args = {}
self.inline = ""
handle_arg_parser = ArgParser(
re.compile("^\s*CONVERT_ARG_HANDLE_CHECKED\((\w+), (\w+), (\d+)\)"),
lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
plain_arg_parser = ArgParser(
re.compile("^\s*CONVERT_ARG_CHECKED\((\w+), (\w+), (\d+)\)"),
lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
number_handle_arg_parser = ArgParser(
re.compile("^\s*CONVERT_NUMBER_ARG_HANDLE_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Number", match.group(1), int(match.group(2))))
smi_arg_parser = ArgParser(
re.compile("^\s*CONVERT_SMI_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Smi", match.group(1), int(match.group(2))))
double_arg_parser = ArgParser(
re.compile("^\s*CONVERT_DOUBLE_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Number", match.group(1), int(match.group(2))))
number_arg_parser = ArgParser(
re.compile(
"^\s*CONVERT_NUMBER_CHECKED\(\w+, (\w+), (\w+), args\[(\d+)\]\)"),
lambda match: Arg(match.group(2), match.group(1), int(match.group(3))))
strict_mode_arg_parser = ArgParser(
re.compile("^\s*CONVERT_STRICT_MODE_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("StrictMode", match.group(1), int(match.group(2))))
boolean_arg_parser = ArgParser(
re.compile("^\s*CONVERT_BOOLEAN_ARG_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("Boolean", match.group(1), int(match.group(2))))
property_details_parser = ArgParser(
re.compile("^\s*CONVERT_PROPERTY_DETAILS_CHECKED\((\w+), (\d+)\)"),
lambda match: Arg("PropertyDetails", match.group(1), int(match.group(2))))
arg_parsers = [handle_arg_parser, plain_arg_parser, number_handle_arg_parser,
smi_arg_parser,
double_arg_parser, number_arg_parser, strict_mode_arg_parser,
boolean_arg_parser, property_details_parser]
def SetArgsLength(self, match):
self.argslength = int(match.group(1))
def TryParseArg(self, line):
for parser in Function.arg_parsers:
match = parser.regex.match(line)
if match:
arg = parser.ArgCtor(match)
self.args[arg.index] = arg
return True
return False
def Filename(self):
return "%s.js" % self.name.lower()
def __str__(self):
s = [self.name, "("]
argcount = self.argslength
if argcount < 0:
print("WARNING: unknown argslength for function %s" % self.name)
if self.args:
argcount = max([self.args[i].index + 1 for i in self.args])
else:
argcount = 0
for i in range(argcount):
if i > 0: s.append(", ")
s.append(self.args[i].type if i in self.args else "<unknown>")
s.append(")")
return "".join(s)
class Macro(object):
def __init__(self, match):
self.name = match.group(1)
self.args = [s.strip() for s in match.group(2).split(",")]
self.lines = []
self.indentation = 0
self.AddLine(match.group(3))
def AddLine(self, line):
if not line: return
if not self.lines:
# This is the first line, detect indentation.
self.indentation = len(line) - len(line.lstrip())
line = line.rstrip("\\\n ")
if not line: return
assert len(line[:self.indentation].strip()) == 0, \
("expected whitespace: '%s', full line: '%s'" %
(line[:self.indentation], line))
line = line[self.indentation:]
if not line: return
self.lines.append(line + "\n")
def Finalize(self):
for arg in self.args:
pattern = re.compile(r"(##|\b)%s(##|\b)" % arg)
for i in range(len(self.lines)):
self.lines[i] = re.sub(pattern, "%%(%s)s" % arg, self.lines[i])
def FillIn(self, arg_values):
filler = {}
assert len(arg_values) == len(self.args)
for i in range(len(self.args)):
filler[self.args[i]] = arg_values[i]
result = []
for line in self.lines:
result.append(line % filler)
return result
# Parses HEADERFILENAME to find out which runtime functions are "inline".
def FindInlineRuntimeFunctions():
inline_functions = []
with open(HEADERFILENAME, "r") as f:
inline_list = "#define INLINE_FUNCTION_LIST(F) \\\n"
inline_function = re.compile(r"^\s*F\((\w+), \d+, \d+\)\s*\\?")
mode = "SEARCHING"
for line in f:
if mode == "ACTIVE":
match = inline_function.match(line)
if match:
inline_functions.append(match.group(1))
if not line.endswith("\\\n"):
mode = "SEARCHING"
elif mode == "SEARCHING":
if line == inline_list:
mode = "ACTIVE"
return inline_functions
def ReadFileAndExpandMacros(filename):
found_macros = {}
expanded_lines = []
with open(filename, "r") as f:
found_macro = None
for line in f:
if found_macro is not None:
found_macro.AddLine(line)
if not line.endswith("\\\n"):
found_macro.Finalize()
found_macro = None
continue
match = MACRO.match(line)
if match:
found_macro = Macro(match)
if found_macro.name in EXPAND_MACROS:
found_macros[found_macro.name] = found_macro
else:
found_macro = None
continue
match = FIRST_WORD.match(line)
if match:
first_word = match.group(1)
if first_word in found_macros:
MACRO_CALL = re.compile("%s\(([^)]*)\)" % first_word)
match = MACRO_CALL.match(line)
assert match
args = [s.strip() for s in match.group(1).split(",")]
expanded_lines += found_macros[first_word].FillIn(args)
continue
expanded_lines.append(line)
return expanded_lines
# Detects runtime functions by parsing FILENAME.
def FindRuntimeFunctions():
inline_functions = FindInlineRuntimeFunctions()
functions = []
expanded_lines = ReadFileAndExpandMacros(FILENAME)
function = None
partial_line = ""
for line in expanded_lines:
# Multi-line definition support, ignoring macros.
if line.startswith("RUNTIME_FUNCTION") and not line.endswith("{\n"):
if line.endswith("\\\n"): continue
partial_line = line.rstrip()
continue
if partial_line:
partial_line += " " + line.strip()
if partial_line.endswith("{"):
line = partial_line
partial_line = ""
else:
continue
match = FUNCTION.match(line)
if match:
function = Function(match)
if function.name in inline_functions:
function.inline = "_"
continue
if function is None: continue
match = ARGSLENGTH.match(line)
if match:
function.SetArgsLength(match)
continue
if function.TryParseArg(line):
continue
if line == FUNCTIONEND:
if function is not None:
functions.append(function)
function = None
return functions
# Hack: This must have the same fields as class Function above, because the
# two are used polymorphically in RunFuzzer(). We could use inheritance...
class Builtin(object):
def __init__(self, match):
self.name = match.group(1)
args = match.group(2)
self.argslength = 0 if args == "" else args.count(",") + 1
self.inline = ""
self.args = {}
if self.argslength > 0:
args = args.split(",")
for i in range(len(args)):
# a = args[i].strip() # TODO: filter out /* comments */ first.
a = ""
self.args[i] = Arg("Object", a, i)
def __str__(self):
return "%s(%d)" % (self.name, self.argslength)
def FindJSBuiltins():
PATH = "src"
fileslist = []
for (root, dirs, files) in os.walk(PATH):
for f in files:
if f.endswith(".js"):
fileslist.append(os.path.join(root, f))
builtins = []
regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
matches = 0
for filename in fileslist:
with open(filename, "r") as f:
file_contents = f.read()
file_contents = js2c.ExpandInlineMacros(file_contents)
lines = file_contents.split("\n")
partial_line = ""
for line in lines:
if line.startswith("function") and not '{' in line:
partial_line += line.rstrip()
continue
if partial_line:
partial_line += " " + line.strip()
if '{' in line:
line = partial_line
partial_line = ""
else:
continue
match = regexp.match(line)
if match:
builtins.append(Builtin(match))
return builtins
# Classifies runtime functions.
def ClassifyFunctions(functions):
# Can be fuzzed with a JavaScript testcase.
js_fuzzable_functions = []
# We have enough information to fuzz these, but they need inputs that
# cannot be created or passed around in JavaScript.
cctest_fuzzable_functions = []
# This script does not have enough information about these.
unknown_functions = []
types = {}
for f in functions:
if f.name in BLACKLISTED:
continue
decision = js_fuzzable_functions
custom = CUSTOM_KNOWN_GOOD_INPUT.get(f.name, None)
if f.argslength < 0:
# Unknown length -> give up unless there's a custom definition.
if custom and custom[-1] is not None:
f.argslength = custom[-1]
assert len(custom) == f.argslength + 1, \
("%s: last custom definition must be argslength" % f.name)
else:
decision = unknown_functions
else:
if custom:
# Any custom definitions must match the known argslength.
assert len(custom) == f.argslength + 1, \
("%s should have %d custom definitions but has %d" %
(f.name, f.argslength + 1, len(custom)))
for i in range(f.argslength):
if custom and custom[i] is not None:
# All good, there's a custom definition.
pass
elif not i in f.args:
# No custom definition and no parse result -> give up.
decision = unknown_functions
else:
t = f.args[i].type
if t in NON_JS_TYPES:
decision = cctest_fuzzable_functions
else:
assert Generator.IsTypeSupported(t), \
("type generator not found for %s, function: %s" % (t, f))
decision.append(f)
return (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions)
def _GetKnownGoodArgs(function, generator):
custom_input = CUSTOM_KNOWN_GOOD_INPUT.get(function.name, None)
definitions = []
argslist = []
for i in range(function.argslength):
if custom_input and custom_input[i] is not None:
name = "arg%d" % i
definitions.append("var %s = %s;" % (name, custom_input[i]))
else:
arg = function.args[i]
name = arg.name
definitions += generator.RandomVariable(name, arg.type, simple=True)
argslist.append(name)
return (definitions, argslist)
def _GenerateTestcase(function, definitions, argslist, throws):
s = ["// Copyright 2014 the V8 project authors. All rights reserved.",
"// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY",
"// Flags: --allow-natives-syntax --harmony --harmony-proxies"
] + definitions
call = "%%%s%s(%s);" % (function.inline, function.name, ", ".join(argslist))
if throws:
s.append("try {")
s.append(call);
s.append("} catch(e) {}")
else:
s.append(call)
testcase = "\n".join(s)
return testcase
def GenerateJSTestcaseForFunction(function):
gen = Generator()
(definitions, argslist) = _GetKnownGoodArgs(function, gen)
testcase = _GenerateTestcase(function, definitions, argslist,
function.name in THROWS)
path = os.path.join(BASEPATH, function.Filename())
with open(path, "w") as f:
f.write("%s\n" % testcase)
def GenerateTestcases(functions):
shutil.rmtree(BASEPATH) # Re-generate everything.
os.makedirs(BASEPATH)
for f in functions:
GenerateJSTestcaseForFunction(f)
def _SaveFileName(save_path, process_id, save_file_index):
return "%s/fuzz_%d_%d.js" % (save_path, process_id, save_file_index)
def _GetFuzzableRuntimeFunctions():
functions = FindRuntimeFunctions()
(js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
ClassifyFunctions(functions)
return js_fuzzable_functions
FUZZ_TARGET_LISTS = {
"runtime": _GetFuzzableRuntimeFunctions,
"builtins": FindJSBuiltins,
}
def RunFuzzer(process_id, options, stop_running):
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.001
SLEEP_TIME_FACTOR = 1.25
base_file_name = "/dev/shm/runtime_fuzz_%d" % process_id
test_file_name = "%s.js" % base_file_name
stderr_file_name = "%s.out" % base_file_name
save_file_index = 0
while os.path.exists(_SaveFileName(options.save_path, process_id,
save_file_index)):
save_file_index += 1
targets = FUZZ_TARGET_LISTS[options.fuzz_target]()
try:
for i in range(options.num_tests):
if stop_running.is_set(): break
function = None
while function is None or function.argslength == 0:
function = random.choice(targets)
args = []
definitions = []
gen = Generator()
for i in range(function.argslength):
arg = function.args[i]
argname = "arg%d%s" % (i, arg.name)
args.append(argname)
definitions += gen.RandomVariable(argname, arg.type, simple=False)
testcase = _GenerateTestcase(function, definitions, args, True)
with open(test_file_name, "w") as f:
f.write("%s\n" % testcase)
with open("/dev/null", "w") as devnull:
with open(stderr_file_name, "w") as stderr:
process = subprocess.Popen(
[options.binary, "--allow-natives-syntax", "--harmony",
"--harmony-proxies", "--enable-slow-asserts", test_file_name],
stdout=devnull, stderr=stderr)
end_time = time.time() + options.timeout
timed_out = False
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if time.time() >= end_time:
# Kill the process and wait for it to exit.
os.kill(process.pid, signal.SIGTERM)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
if exit_code != 0 and not timed_out:
oom = False
with open(stderr_file_name, "r") as stderr:
for line in stderr:
if line.strip() == "# Allocation failed - process out of memory":
oom = True
break
if oom: continue
save_name = _SaveFileName(options.save_path, process_id,
save_file_index)
shutil.copyfile(test_file_name, save_name)
save_file_index += 1
except KeyboardInterrupt:
stop_running.set()
finally:
if os.path.exists(test_file_name):
os.remove(test_file_name)
if os.path.exists(stderr_file_name):
os.remove(stderr_file_name)
def BuildOptionParser():
usage = """Usage: %%prog [options] ACTION
where ACTION can be:
info Print diagnostic info.
check Check that runtime functions can be parsed as expected, and that
test cases exist.
generate Parse source code for runtime functions, and auto-generate
test cases for them. Warning: this will nuke and re-create
%(path)s.
fuzz Generate fuzz tests, run them, save those that crashed (see options).
""" % {"path": os.path.relpath(BASEPATH)}
o = optparse.OptionParser(usage=usage)
o.add_option("--binary", default="out/x64.debug/d8",
help="d8 binary used for running fuzz tests (default: %default)")
o.add_option("--fuzz-target", default="runtime",
help="Set of functions targeted by fuzzing. Allowed values: "
"%s (default: %%default)" % ", ".join(FUZZ_TARGET_LISTS))
o.add_option("-n", "--num-tests", default=1000, type="int",
help="Number of fuzz tests to generate per worker process"
" (default: %default)")
o.add_option("--save-path", default="~/runtime_fuzz_output",
help="Path to directory where failing tests will be stored"
" (default: %default)")
o.add_option("--timeout", default=20, type="int",
help="Timeout for each fuzz test (in seconds, default:"
"%default)")
return o
def ProcessOptions(options, args):
options.save_path = os.path.expanduser(options.save_path)
if options.fuzz_target not in FUZZ_TARGET_LISTS:
print("Invalid fuzz target: %s" % options.fuzz_target)
return False
if len(args) != 1 or args[0] == "help":
return False
return True
def Main():
parser = BuildOptionParser()
(options, args) = parser.parse_args()
if not ProcessOptions(options, args):
parser.print_help()
return 1
action = args[0]
functions = FindRuntimeFunctions()
(js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
ClassifyFunctions(functions)
builtins = FindJSBuiltins()
if action == "test":
print("put your temporary debugging code here")
return 0
if action == "info":
print("%d functions total; js_fuzzable_functions: %d, "
"cctest_fuzzable_functions: %d, unknown_functions: %d"
% (len(functions), len(js_fuzzable_functions),
len(cctest_fuzzable_functions), len(unknown_functions)))
print("%d JavaScript builtins" % len(builtins))
print("unknown functions:")
for f in unknown_functions:
print(f)
return 0
if action == "check":
errors = 0
def CheckCount(actual, expected, description):
if len(actual) != expected:
print("Expected to detect %d %s, but found %d." % (
expected, description, len(actual)))
print("If this change is intentional, please update the expectations"
" at the top of %s." % THIS_SCRIPT)
return 1
return 0
errors += CheckCount(functions, EXPECTED_FUNCTION_COUNT,
"functions in total")
errors += CheckCount(js_fuzzable_functions, EXPECTED_FUZZABLE_COUNT,
"JavaScript-fuzzable functions")
errors += CheckCount(cctest_fuzzable_functions, EXPECTED_CCTEST_COUNT,
"cctest-fuzzable functions")
errors += CheckCount(unknown_functions, EXPECTED_UNKNOWN_COUNT,
"functions with incomplete type information")
errors += CheckCount(builtins, EXPECTED_BUILTINS_COUNT,
"JavaScript builtins")
def CheckTestcasesExisting(functions):
errors = 0
for f in functions:
if not os.path.isfile(os.path.join(BASEPATH, f.Filename())):
print("Missing testcase for %s, please run '%s generate'" %
(f.name, THIS_SCRIPT))
errors += 1
files = filter(lambda filename: not filename.startswith("."),
os.listdir(BASEPATH))
if (len(files) != len(functions)):
unexpected_files = set(files) - set([f.Filename() for f in functions])
for f in unexpected_files:
print("Unexpected testcase: %s" % os.path.join(BASEPATH, f))
errors += 1
print("Run '%s generate' to automatically clean these up."
% THIS_SCRIPT)
return errors
errors += CheckTestcasesExisting(js_fuzzable_functions)
def CheckNameClashes(runtime_functions, builtins):
errors = 0
runtime_map = {}
for f in runtime_functions:
runtime_map[f.name] = 1
for b in builtins:
if b.name in runtime_map:
print("Builtin/Runtime_Function name clash: %s" % b.name)
errors += 1
return errors
errors += CheckNameClashes(functions, builtins)
if errors > 0:
return 1
print("Generated runtime tests: all good.")
return 0
if action == "generate":
GenerateTestcases(js_fuzzable_functions)
return 0
if action == "fuzz":
processes = []
if not os.path.isdir(options.save_path):
os.makedirs(options.save_path)
stop_running = multiprocessing.Event()
for i in range(multiprocessing.cpu_count()):
args = (i, options, stop_running)
p = multiprocessing.Process(target=RunFuzzer, args=args)
p.start()
processes.append(p)
try:
for i in range(len(processes)):
processes[i].join()
except KeyboardInterrupt:
stop_running.set()
for i in range(len(processes)):
processes[i].join()
return 0
if __name__ == "__main__":
sys.exit(Main())
|
|
from django.contrib import admin
from .models import *
class IndianlegacysectionschannelAdmin(admin.ModelAdmin):
list_display = [f.name for f in Indianlegacysectionschannel._meta.get_fields()]
admin.site.register(Indianlegacysectionschannel, IndianlegacysectionschannelAdmin)
class IndianlegacysectionsconversionErrorsAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndianlegacysectionsconversionErrors._meta.get_fields()]
admin.site.register(IndianlegacysectionsconversionErrors, IndianlegacysectionsconversionErrorsAdmin)
class IndianlegacysectionsdbinfoAdmin(admin.ModelAdmin):
list_display = [f.name for f in Indianlegacysectionsdbinfo._meta.get_fields()]
admin.site.register(Indianlegacysectionsdbinfo, IndianlegacysectionsdbinfoAdmin)
class IndianlegacysectionsfieldUnitsAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndianlegacysectionsfieldUnits._meta.get_fields()]
admin.site.register(IndianlegacysectionsfieldUnits, IndianlegacysectionsfieldUnitsAdmin)
class IndianlegacysectionsiShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndianlegacysectionsiShape._meta.get_fields()]
admin.site.register(IndianlegacysectionsiShape, IndianlegacysectionsiShapeAdmin)
class IndianlegacysectionsmShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndianlegacysectionsmShape._meta.get_fields()]
admin.site.register(IndianlegacysectionsmShape, IndianlegacysectionsmShapeAdmin)
class IndianlegacysectionspipeAdmin(admin.ModelAdmin):
list_display = [f.name for f in Indianlegacysectionspipe._meta.get_fields()]
admin.site.register(Indianlegacysectionspipe, IndianlegacysectionspipeAdmin)
class IndianlegacysectionssShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndianlegacysectionssShape._meta.get_fields()]
admin.site.register(IndianlegacysectionssShape, IndianlegacysectionssShapeAdmin)
class IndianlegacysectionstShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndianlegacysectionstShape._meta.get_fields()]
admin.site.register(IndianlegacysectionstShape, IndianlegacysectionstShapeAdmin)
class IndianlegacysectionstubeAdmin(admin.ModelAdmin):
list_display = [f.name for f in Indianlegacysectionstube._meta.get_fields()]
admin.site.register(Indianlegacysectionstube, IndianlegacysectionstubeAdmin)
class IndianlegacysectionswShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndianlegacysectionswShape._meta.get_fields()]
admin.site.register(IndianlegacysectionswShape, IndianlegacysectionswShapeAdmin)
class IndiansectionsangleAdmin(admin.ModelAdmin):
list_display = [f.name for f in Indiansectionsangle._meta.get_fields()]
admin.site.register(Indiansectionsangle, IndiansectionsangleAdmin)
class IndiansectionschannelAdmin(admin.ModelAdmin):
list_display = [f.name for f in Indiansectionschannel._meta.get_fields()]
admin.site.register(Indiansectionschannel, IndiansectionschannelAdmin)
class IndiansectionsdbinfoAdmin(admin.ModelAdmin):
list_display = [f.name for f in Indiansectionsdbinfo._meta.get_fields()]
admin.site.register(Indiansectionsdbinfo, IndiansectionsdbinfoAdmin)
class IndiansectionsfieldUnitsAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndiansectionsfieldUnits._meta.get_fields()]
admin.site.register(IndiansectionsfieldUnits, IndiansectionsfieldUnitsAdmin)
class IndiansectionsiShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndiansectionsiShape._meta.get_fields()]
admin.site.register(IndiansectionsiShape, IndiansectionsiShapeAdmin)
class IndiansectionsmShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndiansectionsmShape._meta.get_fields()]
admin.site.register(IndiansectionsmShape, IndiansectionsmShapeAdmin)
class IndiansectionspipeAdmin(admin.ModelAdmin):
list_display = [f.name for f in Indiansectionspipe._meta.get_fields()]
admin.site.register(Indiansectionspipe, IndiansectionspipeAdmin)
class IndiansectionssShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndiansectionssShape._meta.get_fields()]
admin.site.register(IndiansectionssShape, IndiansectionssShapeAdmin)
class IndiansectionstShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndiansectionstShape._meta.get_fields()]
admin.site.register(IndiansectionstShape, IndiansectionstShapeAdmin)
class IndiansectionstubeAdmin(admin.ModelAdmin):
list_display = [f.name for f in Indiansectionstube._meta.get_fields()]
admin.site.register(Indiansectionstube, IndiansectionstubeAdmin)
class IndiansectionswShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in IndiansectionswShape._meta.get_fields()]
admin.site.register(IndiansectionswShape, IndiansectionswShapeAdmin)
class JindalsectionsdbinfoAdmin(admin.ModelAdmin):
list_display = [f.name for f in Jindalsectionsdbinfo._meta.get_fields()]
admin.site.register(Jindalsectionsdbinfo, JindalsectionsdbinfoAdmin)
class JindalsectionsfieldUnitsAdmin(admin.ModelAdmin):
list_display = [f.name for f in JindalsectionsfieldUnits._meta.get_fields()]
admin.site.register(JindalsectionsfieldUnits, JindalsectionsfieldUnitsAdmin)
class JindalsectionsheShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in JindalsectionsheShape._meta.get_fields()]
admin.site.register(JindalsectionsheShape, JindalsectionsheShapeAdmin)
class JindalsectionsipeShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in JindalsectionsipeShape._meta.get_fields()]
admin.site.register(JindalsectionsipeShape, JindalsectionsipeShapeAdmin)
class JindalsectionsismcShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in JindalsectionsismcShape._meta.get_fields()]
admin.site.register(JindalsectionsismcShape, JindalsectionsismcShapeAdmin)
class JindalsectionsnpbShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in JindalsectionsnpbShape._meta.get_fields()]
admin.site.register(JindalsectionsnpbShape, JindalsectionsnpbShapeAdmin)
class JindalsectionsubShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in JindalsectionsubShape._meta.get_fields()]
admin.site.register(JindalsectionsubShape, JindalsectionsubShapeAdmin)
class JindalsectionsucShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in JindalsectionsucShape._meta.get_fields()]
admin.site.register(JindalsectionsucShape, JindalsectionsucShapeAdmin)
class JindalsectionswpbShapeAdmin(admin.ModelAdmin):
list_display = [f.name for f in JindalsectionswpbShape._meta.get_fields()]
admin.site.register(JindalsectionswpbShape, JindalsectionswpbShapeAdmin)
class JobAdmin(admin.ModelAdmin):
list_display = [f.name for f in Job._meta.get_fields()]
admin.site.register(Job, JobAdmin)
class JobMaterialAdmin(admin.ModelAdmin):
list_display = [f.name for f in JobMaterial._meta.get_fields()]
admin.site.register(JobMaterial, JobMaterialAdmin)
class JointAdmin(admin.ModelAdmin):
list_display = [f.name for f in Joint._meta.get_fields()]
admin.site.register(Joint, JointAdmin)
class MemberAdmin(admin.ModelAdmin):
list_display = [f.name for f in Member._meta.get_fields()]
admin.site.register(Member, MemberAdmin)
class MemberIncidenceAdmin(admin.ModelAdmin):
list_display = [f.name for f in MemberIncidence._meta.get_fields()]
admin.site.register(MemberIncidence, MemberIncidenceAdmin)
class MemberPropertyAdmin(admin.ModelAdmin):
list_display = [f.name for f in MemberProperty._meta.get_fields()]
admin.site.register(MemberProperty, MemberPropertyAdmin)
class TatastructuressectionschsAdmin(admin.ModelAdmin):
list_display = [f.name for f in Tatastructuressectionschs._meta.get_fields()]
admin.site.register(Tatastructuressectionschs, TatastructuressectionschsAdmin)
class TatastructuressectionsdbinfoAdmin(admin.ModelAdmin):
list_display = [f.name for f in Tatastructuressectionsdbinfo._meta.get_fields()]
admin.site.register(Tatastructuressectionsdbinfo, TatastructuressectionsdbinfoAdmin)
class TatastructuressectionsfieldUnitsAdmin(admin.ModelAdmin):
list_display = [f.name for f in TatastructuressectionsfieldUnits._meta.get_fields()]
admin.site.register(TatastructuressectionsfieldUnits, TatastructuressectionsfieldUnitsAdmin)
class TatastructuressectionsrhsAdmin(admin.ModelAdmin):
list_display = [f.name for f in Tatastructuressectionsrhs._meta.get_fields()]
admin.site.register(Tatastructuressectionsrhs, TatastructuressectionsrhsAdmin)
class TatastructuressectionsshsAdmin(admin.ModelAdmin):
list_display = [f.name for f in Tatastructuressectionsshs._meta.get_fields()]
admin.site.register(Tatastructuressectionsshs, TatastructuressectionsshsAdmin)
|
|
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for build stages."""
from __future__ import print_function
import mock
from chromite.cbuildbot import cbuildbot_unittest
from chromite.cbuildbot.stages import artifact_stages
from chromite.cbuildbot.stages import generic_stages_unittest
from chromite.cbuildbot.stages import release_stages
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import results_lib
from chromite.lib import timeout_util
from chromite.cbuildbot.stages.generic_stages_unittest import patch
from chromite.lib.paygen import gspaths
from chromite.lib.paygen import paygen_build_lib
# pylint: disable=protected-access
class PaygenStageTest(generic_stages_unittest.AbstractStageTestCase,
cbuildbot_unittest.SimpleBuilderTestCase):
"""Test the PaygenStageStage."""
BOT_ID = 'x86-mario-release'
RELEASE_TAG = '0.0.1'
SIGNER_RESULT = """
{ "status": { "status": "passed" }, "board": "link",
"keyset": "link-mp-v4", "type": "recovery", "channel": "stable" }
"""
INSNS_URLS_PER_CHANNEL = {
'chan1': ['chan1_uri1', 'chan1_uri2'],
'chan2': ['chan2_uri1'],
}
def setUp(self):
self._Prepare()
def ConstructStage(self):
archive_stage = artifact_stages.ArchiveStage(self._run, self._current_board)
return release_stages.PaygenStage(self._run, self._current_board,
archive_stage)
def testWaitForPushImageSuccess(self):
"""Test waiting for input from PushImage."""
stage = self.ConstructStage()
stage.board_runattrs.SetParallel(
'instruction_urls_per_channel', self.INSNS_URLS_PER_CHANNEL)
self.assertEqual(stage._WaitForPushImage(), self.INSNS_URLS_PER_CHANNEL)
def testWaitForPushImageError(self):
"""Test WaitForPushImageError with an error output from pushimage."""
stage = self.ConstructStage()
stage.board_runattrs.SetParallel(
'instruction_urls_per_channel', None)
self.assertRaises(release_stages.MissingInstructionException,
stage._WaitForPushImage)
def testWaitForSigningResultsSuccess(self):
"""Test that _WaitForSigningResults works when signing works."""
results = ['chan1_uri1.json', 'chan1_uri2.json', 'chan2_uri1.json']
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
mock_gs_ctx.Cat.return_value = self.SIGNER_RESULT
notifier = mock.Mock()
stage = self.ConstructStage()
stage._WaitForSigningResults(self.INSNS_URLS_PER_CHANNEL, notifier)
self.assertEqual(notifier.mock_calls,
[mock.call('chan1'),
mock.call('chan2')])
for result in results:
mock_gs_ctx.Cat.assert_any_call(result)
def testWaitForSigningResultsSuccessNothingSigned(self):
"""Test _WaitForSigningResults when there are no signed images."""
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
mock_gs_ctx.Cat.return_value = self.SIGNER_RESULT
notifier = mock.Mock()
stage = self.ConstructStage()
stage._WaitForSigningResults({}, notifier)
self.assertEqual(notifier.mock_calls, [])
self.assertEqual(mock_gs_ctx.Cat.mock_calls, [])
def testWaitForSigningResultsFailure(self):
"""Test _WaitForSigningResults when the signers report an error."""
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
mock_gs_ctx.Cat.return_value = """
{ "status": { "status": "failed" }, "board": "link",
"keyset": "link-mp-v4", "type": "recovery", "channel": "stable" }
"""
notifier = mock.Mock()
stage = self.ConstructStage()
self.assertRaisesStringifyable(
release_stages.SignerFailure,
stage._WaitForSigningResults,
{'chan1': ['chan1_uri1']}, notifier)
# Ensure we didn't notify anyone of success.
self.assertEqual(notifier.mock_calls, [])
self.assertEqual(mock_gs_ctx.Cat.mock_calls,
[mock.call('chan1_uri1.json')])
def testWaitForSigningResultsTimeout(self):
"""Test that _WaitForSigningResults reports timeouts correctly."""
with patch(release_stages.timeout_util, 'WaitForSuccess') as mock_wait:
mock_wait.side_effect = timeout_util.TimeoutError
notifier = mock.Mock()
stage = self.ConstructStage()
self.assertRaises(release_stages.SignerResultsTimeout,
stage._WaitForSigningResults,
{'chan1': ['chan1_uri1']}, notifier)
self.assertEqual(notifier.mock_calls, [])
def testCheckForResultsSuccess(self):
"""Test that _CheckForResults works when signing works."""
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
mock_gs_ctx.Cat.return_value = self.SIGNER_RESULT
notifier = mock.Mock()
stage = self.ConstructStage()
self.assertTrue(
stage._CheckForResults(mock_gs_ctx,
self.INSNS_URLS_PER_CHANNEL,
notifier))
self.assertEqual(notifier.mock_calls,
[mock.call('chan1'), mock.call('chan2')])
def testCheckForResultsSuccessNoChannels(self):
"""Test that _CheckForResults works when there is nothing to check for."""
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
notifier = mock.Mock()
stage = self.ConstructStage()
# Ensure we find that we are ready if there are no channels to look for.
self.assertTrue(stage._CheckForResults(mock_gs_ctx, {}, notifier))
# Ensure we didn't contact GS while checking for no channels.
self.assertFalse(mock_gs_ctx.Cat.called)
self.assertEqual(notifier.mock_calls, [])
def testCheckForResultsPartialComplete(self):
"""Verify _CheckForResults handles partial signing results."""
def catChan2Success(url):
if url.startswith('chan2'):
return self.SIGNER_RESULT
else:
raise release_stages.gs.GSNoSuchKey()
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
mock_gs_ctx.Cat.side_effect = catChan2Success
notifier = mock.Mock()
stage = self.ConstructStage()
self.assertFalse(
stage._CheckForResults(mock_gs_ctx,
self.INSNS_URLS_PER_CHANNEL,
notifier))
self.assertEqual(stage.signing_results, {
'chan1': {},
'chan2': {
'chan2_uri1.json': {
'board': 'link',
'channel': 'stable',
'keyset': 'link-mp-v4',
'status': {'status': 'passed'},
'type': 'recovery'
}
}
})
self.assertEqual(notifier.mock_calls, [mock.call('chan2')])
def testCheckForResultsUnexpectedJson(self):
"""Verify _CheckForResults handles unexpected Json values."""
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
mock_gs_ctx.Cat.return_value = '{}'
notifier = mock.Mock()
stage = self.ConstructStage()
self.assertFalse(
stage._CheckForResults(mock_gs_ctx,
self.INSNS_URLS_PER_CHANNEL,
notifier))
self.assertEqual(stage.signing_results, {
'chan1': {}, 'chan2': {}
})
self.assertEqual(notifier.mock_calls, [])
def testCheckForResultsMalformedJson(self):
"""Verify _CheckForResults handles unexpected Json values."""
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
mock_gs_ctx.Cat.return_value = '{'
notifier = mock.Mock()
stage = self.ConstructStage()
self.assertFalse(
stage._CheckForResults(mock_gs_ctx,
self.INSNS_URLS_PER_CHANNEL,
notifier))
self.assertEqual(stage.signing_results, {
'chan1': {}, 'chan2': {}
})
self.assertEqual(notifier.mock_calls, [])
def testCheckForResultsNoResult(self):
"""Verify _CheckForResults handles missing signer results."""
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
mock_gs_ctx.Cat.side_effect = release_stages.gs.GSNoSuchKey
notifier = mock.Mock()
stage = self.ConstructStage()
self.assertFalse(
stage._CheckForResults(mock_gs_ctx,
self.INSNS_URLS_PER_CHANNEL,
notifier))
self.assertEqual(stage.signing_results, {
'chan1': {}, 'chan2': {}
})
self.assertEqual(notifier.mock_calls, [])
def testCheckForResultsFailed(self):
"""Verify _CheckForResults handles missing signer results."""
with patch(release_stages.gs, 'GSContext') as mock_gs_ctx_init:
mock_gs_ctx = mock_gs_ctx_init.return_value
mock_gs_ctx.Cat.side_effect = release_stages.gs.GSNoSuchKey
notifier = mock.Mock()
stage = self.ConstructStage()
self.assertFalse(
stage._CheckForResults(mock_gs_ctx,
self.INSNS_URLS_PER_CHANNEL,
notifier))
self.assertEqual(stage.signing_results, {
'chan1': {}, 'chan2': {}
})
self.assertEqual(notifier.mock_calls, [])
def generateNotifyCalls(self, channels):
def side_effect(_, notifier):
for channel in channels:
notifier(channel)
return side_effect
def testPerformStageSuccess(self):
"""Test that PaygenStage works when signing works."""
with patch(release_stages.parallel, 'BackgroundTaskRunner') as background:
queue = background().__enter__()
# This patch is only required for external builds with no config data.
with patch(paygen_build_lib, 'ValidateBoardConfig'):
stage = self.ConstructStage()
with patch(stage, '_WaitForPushImage') as wait_push:
with patch(stage, '_WaitForSigningResults') as wait_signing:
wait_push.return_value = self.INSNS_URLS_PER_CHANNEL
wait_signing.side_effect = self.generateNotifyCalls(('stable',
'beta'))
stage.PerformStage()
# Verify that we queue up work
self.assertEqual(
queue.put.call_args_list,
[mock.call(('stable', 'x86-mario', '0.0.1', False, False, False)),
mock.call(('beta', 'x86-mario', '0.0.1', False, False, False))])
def testPerformStageSuccessVarientBoard(self):
"""Test that SignerResultsStage works with varient boards.
Varient boards need some name conversion. Make sure that's okay.
"""
self._current_board = 'x86-alex_he'
with patch(release_stages.parallel, 'BackgroundTaskRunner') as background:
queue = background().__enter__()
# This patch is only required for external builds with no config data.
with patch(paygen_build_lib, 'ValidateBoardConfig'):
stage = self.ConstructStage()
with patch(stage, '_WaitForPushImage') as wait_push:
with patch(stage, '_WaitForSigningResults') as wait_signing:
wait_push.return_value = self.INSNS_URLS_PER_CHANNEL
wait_signing.side_effect = self.generateNotifyCalls(('stable',
'beta'))
stage.PerformStage()
# Verify that we queue up work
self.assertEqual(
queue.put.call_args_list,
[mock.call(('stable', 'x86-alex-he', '0.0.1', False, False, False)),
mock.call(('beta', 'x86-alex-he', '0.0.1', False, False, False))])
def testPerformStageSigningFailed(self):
"""Test that PaygenStage works when signing works."""
with patch(release_stages.parallel, 'BackgroundTaskRunner') as background:
queue = background().__enter__()
# This patch is only required for external builds with no config data.
with patch(paygen_build_lib, 'ValidateBoardConfig'):
stage = self.ConstructStage()
with patch(stage, '_WaitForPushImage') as wait_push:
with patch(stage, '_WaitForSigningResults') as wait_signing:
wait_push.return_value = self.INSNS_URLS_PER_CHANNEL
wait_signing.side_effect = release_stages.SignerFailure
self.assertRaises(release_stages.SignerFailure,
stage.PerformStage)
# Ensure no work was queued up.
self.assertFalse(queue.put.called)
def testPerformStageBackgroundFail(self):
"""Test that exception from background processes are properly handled."""
with patch(paygen_build_lib, 'CreatePayloads') as create_payloads:
create_payloads.side_effect = failures_lib.TestLabFailure
# This patch is only required for external builds with no config data.
with patch(paygen_build_lib, 'ValidateBoardConfig'):
stage = release_stages.PaygenStage(
self._run, self._current_board,
archive_stage=None, channels=['foo', 'bar'])
with patch(stage, '_HandleExceptionAsWarning') as warning_handler:
warning_handler.return_value = (results_lib.Results.FORGIVEN,
'description',
0)
stage.Run()
# This proves the exception was turned into a warning.
self.assertTrue(warning_handler.called)
def testPerformStageTrybot(self):
"""Test the PerformStage alternate behavior for trybot runs."""
with patch(release_stages.parallel, 'BackgroundTaskRunner') as background:
queue = background().__enter__()
# This patch is only required for external builds with no config data.
with patch(paygen_build_lib, 'ValidateBoardConfig'):
# The stage is constructed differently for trybots, so don't use
# ConstructStage.
stage = release_stages.PaygenStage(
self._run, self._current_board, archive_stage=None,
channels=['foo', 'bar'])
with patch(stage, '_WaitForPushImage') as wait_push:
with patch(stage, '_WaitForSigningResults') as wait_signing:
stage.PerformStage()
# Make sure we don't wait on push_image or signing in this case.
self.assertEqual(wait_push.mock_calls, [])
self.assertEqual(wait_signing.mock_calls, [])
# Notice that we didn't put anything in _wait_for_channel_signing, but
# still got results right away.
self.assertEqual(
queue.put.call_args_list,
[mock.call(('foo', 'x86-mario', '0.0.1', False, False, False)),
mock.call(('bar', 'x86-mario', '0.0.1', False, False, False))])
def testPerformStageUnknownBoard(self):
"""Test that PaygenStage exits when an unknown board is specified."""
self._current_board = 'unknown-board-name'
badBoardException = paygen_build_lib.BoardNotConfigured(self._current_board)
# This patch is only required for external builds with no config data.
with patch(paygen_build_lib, 'ValidateBoardConfig') as validate_boards:
validate_boards.side_effect = badBoardException
stage = self.ConstructStage()
self.assertRaises(release_stages.PaygenNoPaygenConfigForBoard,
stage.PerformStage)
def testRunPaygenInProcess(self):
"""Test that _RunPaygenInProcess works in the simple case."""
with patch(paygen_build_lib, 'CreatePayloads') as create_payloads:
# Call the method under test.
stage = self.ConstructStage()
stage._RunPaygenInProcess('foo', 'foo-board', 'foo-version',
False, False, False)
# Ensure arguments are properly converted and passed along.
create_payloads.assert_called_with(gspaths.Build(version='foo-version',
board='foo-board',
channel='foo-channel'),
dry_run=False,
work_dir=mock.ANY,
run_parallel=True,
run_on_builder=True,
skip_delta_payloads=False,
disable_tests=False)
def testRunPaygenInProcessComplex(self):
"""Test that _RunPaygenInProcess with arguments that are more unusual."""
with patch(paygen_build_lib, 'CreatePayloads') as create_payloads:
# Call the method under test.
# Use release tools channel naming, and a board name including a variant.
stage = self.ConstructStage()
stage._RunPaygenInProcess('foo-channel', 'foo-board-variant',
'foo-version', True, True, True)
# Ensure arguments are properly converted and passed along.
create_payloads.assert_called_with(
gspaths.Build(version='foo-version',
board='foo-board-variant',
channel='foo-channel'),
dry_run=True,
work_dir=mock.ANY,
run_parallel=True,
run_on_builder=True,
skip_delta_payloads=True,
disable_tests=True)
|
|
from __future__ import absolute_import
import six
from django.core.urlresolvers import reverse
from sentry.models import Organization, OrganizationStatus, User, UserOption
from sentry.testutils import APITestCase
class UserDetailsTest(APITestCase):
# TODO(dcramer): theres currently no way to look up other users
# def test_simple(self):
# user = self.create_user(email='a@example.com')
# user2 = self.create_user(email='b@example.com')
# self.login_as(user=user)
# url = reverse('sentry-api-0-user-details', kwargs={
# 'user_id': user2.id,
# })
# resp = self.client.get(url, format='json')
# assert resp.status_code == 200, resp.content
# assert resp.data['id'] == six.text_type(user.id)
# assert 'identities' not in resp.data
def test_lookup_self(self):
user = self.create_user(email='a@example.com')
self.login_as(user=user)
url = reverse(
'sentry-api-0-user-details', kwargs={
'user_id': 'me',
}
)
resp = self.client.get(url, format='json')
assert resp.status_code == 200, resp.content
assert resp.data['id'] == six.text_type(user.id)
assert resp.data['options']['timezone'] == 'UTC'
assert resp.data['options']['language'] == 'en'
assert resp.data['options']['stacktraceOrder'] == -1
assert not resp.data['options']['clock24Hours']
def test_superuser(self):
user = self.create_user(email='a@example.com')
superuser = self.create_user(email='b@example.com', is_superuser=True)
self.login_as(user=superuser, superuser=True)
url = reverse(
'sentry-api-0-user-details', kwargs={
'user_id': user.id,
}
)
resp = self.client.get(url)
assert resp.status_code == 200, resp.content
assert resp.data['id'] == six.text_type(user.id)
assert 'identities' in resp.data
assert len(resp.data['identities']) == 0
class UserUpdateTest(APITestCase):
def setUp(self):
self.user = self.create_user(email='a@example.com', is_managed=False, name='example name')
self.login_as(user=self.user)
self.url = reverse(
'sentry-api-0-user-details', kwargs={
'user_id': 'me',
}
)
def test_simple(self):
resp = self.client.put(
self.url,
data={
'name': 'hello world',
'options': {
'timezone': 'UTC',
'stacktraceOrder': '2',
'language': 'fr',
'clock24Hours': True,
'extra': True,
'seenReleaseBroadcast': True,
}
}
)
assert resp.status_code == 200, resp.content
assert resp.data['id'] == six.text_type(self.user.id)
user = User.objects.get(id=self.user.id)
assert user.name == 'hello world'
# note: email should not change, removed support for email changing from this endpoint
assert user.email == 'a@example.com'
assert user.username == 'a@example.com'
assert UserOption.objects.get_value(
user=user,
key='seen_release_broadcast',
) is True
assert UserOption.objects.get_value(user=self.user, key='timezone') == 'UTC'
assert UserOption.objects.get_value(user=self.user, key='stacktrace_order') == '2'
assert UserOption.objects.get_value(user=self.user, key='language') == 'fr'
assert UserOption.objects.get_value(user=self.user, key='clock_24_hours')
assert not UserOption.objects.get_value(user=self.user, key='extra')
def test_superuser(self):
# superuser should be able to change self.user's name
superuser = self.create_user(email='b@example.com', is_superuser=True)
self.login_as(user=superuser, superuser=True)
url = reverse(
'sentry-api-0-user-details', kwargs={
'user_id': self.user.id,
}
)
resp = self.client.put(
url,
data={
'name': 'hello world',
'email': 'c@example.com',
'isActive': 'false',
}
)
assert resp.status_code == 200, resp.content
assert resp.data['id'] == six.text_type(self.user.id)
user = User.objects.get(id=self.user.id)
assert user.name == 'hello world'
# note: email should not change, removed support for email changing from this endpoint
assert user.email == 'a@example.com'
assert user.username == 'a@example.com'
assert not user.is_active
def test_managed_fields(self):
assert self.user.name == 'example name'
with self.settings(SENTRY_MANAGED_USER_FIELDS=('name', )):
resp = self.client.put(
self.url,
data={
'name': 'new name',
}
)
assert resp.status_code == 200
# name remains unchanged
user = User.objects.get(id=self.user.id)
assert user
def test_change_username_when_different(self):
# if email != username and we change username, only username should change
user = self.create_user(email="c@example.com", username="diff@example.com")
self.login_as(user=user, superuser=False)
resp = self.client.put(
self.url,
data={
'username': 'new@example.com',
}
)
assert resp.status_code == 200, resp.content
user = User.objects.get(id=user.id)
assert user.email == 'c@example.com'
assert user.username == 'new@example.com'
def test_change_username_when_same(self):
# if email == username and we change username,
# keep email in sync
user = self.create_user(email="c@example.com", username="c@example.com")
self.login_as(user=user)
resp = self.client.put(
self.url,
data={
'username': 'new@example.com',
}
)
assert resp.status_code == 200, resp.content
user = User.objects.get(id=user.id)
assert user.email == 'new@example.com'
assert user.username == 'new@example.com'
def test_close_account(self):
self.login_as(user=self.user)
org_single_owner = self.create_organization(name="A", owner=self.user)
user2 = self.create_user(email="user2@example.com")
org_with_other_owner = self.create_organization(name="B", owner=self.user)
org_as_other_owner = self.create_organization(name="C", owner=user2)
not_owned_org = self.create_organization(name="D", owner=user2)
self.create_member(
user=user2,
organization=org_with_other_owner,
role='owner',
)
self.create_member(
user=self.user,
organization=org_as_other_owner,
role='owner',
)
url = reverse(
'sentry-api-0-user-details', kwargs={
'user_id': self.user.id,
}
)
# test validations
response = self.client.delete(url, data={
})
assert response.status_code == 400
response = self.client.delete(url, data={
'organizations': None
})
assert response.status_code == 400
# test actual delete
response = self.client.delete(url, data={
'organizations': [org_with_other_owner.slug, org_as_other_owner.slug, not_owned_org.slug]
})
# deletes org_single_owner even though it wasn't specified in array
# because it has a single owner
assert Organization.objects.get(
id=org_single_owner.id).status == OrganizationStatus.PENDING_DELETION
# should delete org_with_other_owner, and org_as_other_owner
assert Organization.objects.get(
id=org_with_other_owner.id).status == OrganizationStatus.PENDING_DELETION
assert Organization.objects.get(
id=org_as_other_owner.id).status == OrganizationStatus.PENDING_DELETION
# should NOT delete `not_owned_org`
assert Organization.objects.get(id=not_owned_org.id).status == OrganizationStatus.ACTIVE
assert response.status_code == 204
def test_close_account_no_orgs(self):
self.login_as(user=self.user)
org_single_owner = self.create_organization(name="A", owner=self.user)
user2 = self.create_user(email="user2@example.com")
org_with_other_owner = self.create_organization(name="B", owner=self.user)
org_as_other_owner = self.create_organization(name="C", owner=user2)
not_owned_org = self.create_organization(name="D", owner=user2)
self.create_member(
user=user2,
organization=org_with_other_owner,
role='owner',
)
self.create_member(
user=self.user,
organization=org_as_other_owner,
role='owner',
)
url = reverse(
'sentry-api-0-user-details', kwargs={
'user_id': self.user.id,
}
)
response = self.client.delete(url, data={
'organizations': []
})
assert response.status_code == 204
# deletes org_single_owner even though it wasn't specified in array
# because it has a single owner
assert Organization.objects.get(
id=org_single_owner.id).status == OrganizationStatus.PENDING_DELETION
# should NOT delete `not_owned_org`
assert Organization.objects.get(id=not_owned_org.id).status == OrganizationStatus.ACTIVE
|
|
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Dan Wendlandt, Nicira, Inc.
import unittest
import uuid
import mox
from akanda.rug.common.linux import ovs_lib, utils
def generate_uuid():
return str(uuid.uuid4())
class OVS_Lib_Test(unittest.TestCase):
"""
A test suite to excercise the OVS libraries shared by Quantum agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=2"
self.mox = mox.Mox()
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.mox.StubOutWithMock(utils, "execute")
self.addCleanup(self.mox.UnsetStubs)
def test_vifport(self):
"""create and stringify vif port, confirm no exceptions"""
self.mox.ReplayAll()
pname = "vif1.0"
ofport = 5
vif_id = generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
foo = str(port)
self.assert_(foo)
self.mox.VerifyAll()
def test_reset_bridge(self):
utils.execute(["ovs-vsctl", self.TO, "--",
"--if-exists", "del-br", self.BR_NAME],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "add-br", self.BR_NAME],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.reset_bridge()
self.mox.VerifyAll()
def test_delete_port(self):
pname = "tap5"
utils.execute(["ovs-vsctl", self.TO, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_port(pname)
self.mox.VerifyAll()
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,in_port=%s,dl_vlan=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (ofport, vid, lsw_id)],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.add_flow(priority=2, dl_src="ca:fe:de:ad:be:ef",
actions="strip_vlan,output:0")
self.br.add_flow(priority=1, actions="normal")
self.br.add_flow(priority=2, actions="drop")
self.br.add_flow(priority=2, in_port=ofport, actions="drop")
self.br.add_flow(priority=4, in_port=ofport, dl_vlan=vid,
actions="strip_vlan,set_tunnel:%s,normal" %
(lsw_id))
self.br.add_flow(priority=3, tun_id=lsw_id,
actions="mod_vlan_vid:%s,output:%s" %
(vid, ofport))
self.mox.VerifyAll()
def test_get_port_ofport(self):
pname = "tap99"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.get_port_ofport(pname), ofport)
self.mox.VerifyAll()
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
utils.execute(["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper).AndReturn(datapath_id)
self.mox.ReplayAll()
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.mox.VerifyAll()
def test_count_flows(self):
utils.execute(["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper).AndReturn('ignore'
'\nflow-1\n')
self.mox.ReplayAll()
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.mox.VerifyAll()
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
self.mox.VerifyAll()
def test_add_tunnel_port(self):
pname = "tap99"
ip = "9.9.9.9"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=gre"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:remote_ip=" + ip],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:in_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:out_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_tunnel_port(pname, ip), ofport)
self.mox.VerifyAll()
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=patch"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set",
"Interface", pname, "options:peer=" + peer],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
self.mox.VerifyAll()
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = generate_uuid()
mac = "ca:fe:de:ad:be:ef"
utils.execute(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper).AndReturn("%s\n" % pname)
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper).AndReturn(external_ids)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
if is_xen:
utils.execute(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper).AndReturn(vif_id)
self.mox.ReplayAll()
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
self.mox.VerifyAll()
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(True)
def test_clear_db_attribute(self):
pname = "tap77"
utils.execute(["ovs-vsctl", self.TO, "clear", "Port",
pname, "tag"], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.clear_db_attribute("Port", pname, "tag")
self.mox.VerifyAll()
def test_port_id_regex(self):
result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",'
' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",'
' iface-status=active}\nname :'
' "dhc5c1321a7-c7"\nofport : 2\n')
match = self.br.re_id.search(result)
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
ofport = int(match.group('ofport'))
self.assertEqual(vif_mac, 'fa:16:3e:23:5b:f2')
self.assertEqual(vif_id, '5c1321a7-c73f-4a77-95e6-9f86402e5c8f')
self.assertEqual(port_name, 'dhc5c1321a7-c7')
self.assertEqual(ofport, 2)
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndReturn('br-int')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.mox.VerifyAll()
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndRaise(Exception)
self.mox.ReplayAll()
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.mox.VerifyAll()
def test_delete_all_ports(self):
self.mox.StubOutWithMock(self.br, 'get_port_name_list')
self.br.get_port_name_list().AndReturn(['port1'])
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('port1')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=True)
self.mox.VerifyAll()
def test_delete_quantum_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
ports = [port1, port2]
self.mox.StubOutWithMock(self.br, 'get_vif_ports')
self.br.get_vif_ports().AndReturn(ports)
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('tap1234')
self.br.delete_port('tap5678')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=False)
self.mox.VerifyAll()
def test_get_bridges(self):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "list-br"],
root_helper=root_helper).AndReturn('br-int\nbr-ex\n')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.mox.VerifyAll()
|
|
# thinIt, a JS/CSS minifier written in Python 2.7.12
# ------------ Setup ------------------
import os, sys, re
fileName = ""
jsTextArray = []
jsText = ""
jsMinName = ""
fileType = ""
commentsOpen = False
singleQuoteOpen = False
doubleQuoteOpen = False
madeAChange = True
comment = ""
syntax = '> thinIt.py [fileName] ["optionalCommentText"]'
ver = "v0.1.0"
sizeBefore = 0
sizeAfter = 0
percentSmaller = 0
alphanumerics = re.compile('[A-Za-z0-9_\.$]+')
# ------------ Functions ------------------
def getParams():
# if no file name, display msg
# get file name, also get optional text
global fileName, comment
if len(sys.argv) < 2:
# no parameters
print
print "Please specify a JS or CSS file to minify, in this format:"
showSyntax()
if sys.argv[1].lower() == "help" or sys.argv[1] == "?" or sys.argv[1] == "/?":
print
print "thinIt is a JavaScript and CSS minifier written in Python 2.7.12."
print "To use it, please specify a JS or CSS file to minify, in this format:"
showSyntax()
fileName = sys.argv[1]
# check if parameter is a valid file name
if len(sys.argv) > 2:
comment = sys.argv[2]
def showSyntax():
print
print syntax
print
print " fileName = the JavaScript or CSS file to minify (plus path, if not in the current directory) (in quotes if there are spaces)"
print " optionalCommentText = A comment to insert as the first line, i.e. description or version number (optional, in quotes)"
print
quit()
def clear():
# clear screen
os.system("cls" if os.name == "nt" else "clear")
def getFileType():
fileExt = jsFile.name.split('.')
return fileExt[len(fileExt)-1].lower()
def getMinName():
# get min name - separate name by . then insert ".min" before last one
newNameArray = jsFile.name.split('.')
global jsMinName, fileType
for i in range(0, len(newNameArray)):
if i < (len(newNameArray)-1):
jsMinName = jsMinName + newNameArray[i] + "."
else:
jsMinName = jsMinName + "min." + fileType
def saveMinFile():
# create output min file
outputFile = open(jsMinName, "w")
outputFile.write(jsText)
outputFile.close()
def checkForCommentEnd(thisLine):
global madeAChange, jsTextArray, commentsOpen
for j in range(0, len(thisLine)):
# loop through line, if see "*/" anywhere, end of comments
if thisLine[j:j+2] == "*/":
madeAChange = True
commentsOpen = False
thisLine = thisLine[j+2:]
break
if commentsOpen:
thisLine = ""
return thisLine
def removeLeadingSpace(thisLine):
# check for & remove leading space
global madeAChange
if thisLine[0:1] == " ":
madeAChange = True
return thisLine[1:]
else:
return thisLine
def removeTrailingSpace(thisLine):
# check for & remove trailing space
global madeAChange
if thisLine[len(thisLine)-1:len(thisLine)] == " ":
madeAChange = True
return thisLine[:(len(thisLine)-1)]
else:
return thisLine
def removeLeadingTab(thisLine):
# check for & remove leading tab
global madeAChange
if thisLine[0:1] == chr(9):
madeAChange = True
return thisLine[1:]
else:
return thisLine
def removeTrailingTab(thisLine):
# check for & remove trailing tab
global madeAChange
if thisLine[len(thisLine)-1:len(thisLine)] == chr(9):
madeAChange = True
return thisLine[:(len(thisLine)-1)]
else:
return thisLine
def removeIndentedComment(thisLine, commentChar):
# check for indented Python/VBScript comments & remove them
# this function has to be called after checking for 1st char comment, otherwise those will be skipped
returnLine = ""
for j in range(0, len(thisLine)):
# loop through line, if see "#" after just spaces or tabs, remove the line
if thisLine[j:j+1] == " " or thisLine[j:j+1] == chr(9):
# if char is a space or tab, keep checking
if thisLine[j+1:j+2] == commentChar:
# if found a comment
returnLine = ""
break
else:
continue
else:
# if not a space or tab, skip this line
returnLine = thisLine
break
return returnLine
def removeInternalSpaces(thisLine):
# remove spaces between non-alphanumeric characters; *** for JS, check to be sure not in quotes first; for HTML, check to be sure not a <pre> tag
global madeAChange
if len(thisLine) < 3:
return thisLine
newLine = thisLine[0]
for j in range(1, len(thisLine)-1):
# loop through line, remove unnecessary spaces
if thisLine[j] == " ":
if (alphanumerics.search(thisLine[j-1])) and (alphanumerics.search(thisLine[j+1])):
# if characters on both sides of space are alphanumeric, keep the space
newLine = newLine + thisLine[j]
else:
# else, skip the space
madeAChange = True
continue
else:
newLine = newLine + thisLine[j]
newLine = newLine + thisLine[len(thisLine)-1]
return newLine
def removeInternalJSComments(line):
# remove /* */ comments that may not be at the beginning of the line
global madeAChange, commentsOpen
for j in range(0, len(jsTextArray[line]) - 1):
# loop through line to see if see /*
if jsTextArray[line][j:j+2] == "/*":
commentsOpen = True
madeAChange = True
# call checkforcommentend with string from j onward; if it returns empty string (no comment end) delete line from j onward; else concatenate
# beginning up to j, plus part that was returned
tempString = checkForCommentEnd(jsTextArray[line])
if tempString == "":
# no comment end
return jsTextArray[line][0:j]
else:
# if found comment end
return jsTextArray[line][0:j] + tempString
return jsTextArray[line]
def condenseLines(num):
# for lines that don't end w/ a letter or number, check next non-blank char to be sure not alphanumeric; if not, consolidate the lines
# for: JS, CSS
global jsText
# for making output string, loop through chars & lines after current EOL & make sure that next non-blank char is not alphanumeric
doneChecking = False
for j in range(num + 1, len(jsTextArray)-1):
# loop through rest of the lines until find a non-blank character
for k in range(0, len(jsTextArray[j])-1):
if jsTextArray[j][k] == "" or jsTextArray[j][k] == " " or jsTextArray[j][k] == chr(10) or jsTextArray[j][k] == chr(13):
# if a space or line end
continue
elif alphanumerics.search(jsTextArray[j][k]):
# if it's an alphanumeric character
jsText = jsText + jsTextArray[num] + " "
doneChecking = True
break
else:
# if not alphanumeric character
jsText = jsText + jsTextArray[num]
doneChecking = True
break
if doneChecking:
break
if not doneChecking:
jsText = jsText + jsTextArray[num]
def checkForInternalSingleComment(thisLine):
# check for single-line comment somewhere in line other than at beginning
# use fileType to determine if looking for // or # or '
# ** if changing to do this in the while function, have to check to be sure not in quotes (or escaped) first **
for k in range(0, len(thisLine)):
# loop through line looking for comment start
if fileType[0:2] == "py":
if thisLine[k] == "#":
return True
if fileType == "js":
if thisLine[k:k+2] == "//":
return True
if fileType[0:2] == "vb":
if thisLine[k] == "'":
return True
return False
def checkForQuotes(thisLine):
# loop through line, check for single or double opening quote (except VBScript file -- only check for double quotes)
global madeAChange, singleQuoteOpen, doubleQuoteOpen
for j in range(0, len(thisLine)):
if thisLine[j] == "\"":
# *** have to have separate check for closing quotes -- when to do that? ***
pass
def checkforEOLChar(num, eol):
# check line for that language's EOL character; if found, remove it and concatenate that line and the next one
# by saving them both as the i+1 line (so they get checked next time through, and not skipped)
# for JS and Python: \
# for VBScript: _
# --- use chr() instead of escaping backslash ---
pass
def concatHTMLHead():
global jsText
# remove line breaks from all tags before the <body> tag
for i in range(0, len(jsTextArray) - 1):
# search that line for <body -- if found, concatenate all prior lines
if "<body" in jsTextArray[i]:
break
# if not at body yet, remove internal spaces
jsTextArray[i] = removeInternalSpaces(jsTextArray[i])
# loop back through prior lines, concatenate them
for j in range(0, i - 1):
if len(jsTextArray[j]) > 1:
lastChar = jsTextArray[j][len(jsTextArray[j]) - 1] # **** make this into a concatHTMLLines function, call it here & from outputstring func, check for <pre> tags
else:
lastChar = jsTextArray[j][0:1]
if lastChar == ">" or lastChar == " " or lastChar == "<":
jsText = jsText + jsTextArray[j]
else:
jsText = jsText + jsTextArray[j] + " "
return i
def loopThrough():
global commentsOpen, madeAChange, fileType
while madeAChange:
madeAChange = False
commentsOpen = False
for i in range(0, len(jsTextArray)):
# loop through the lines in the file
if jsTextArray[i] == chr(13) or jsTextArray[i] == chr(10) or jsTextArray[i] == "":
# if an empty line, skip it
jsTextArray[i] = ""
continue
if commentsOpen:
# if currently in a comment that could be multi-line
jsTextArray[i] = checkForCommentEnd(jsTextArray[i])
else:
# if not currently in a comment, check for beginning of a comment
if jsTextArray[i][0:2] == "//" and fileType == "js":
madeAChange = True
jsTextArray[i] = ""
continue
if jsTextArray[i][0:1] == "#" and fileType[0:2] == "py":
madeAChange = True
jsTextArray[i] = ""
continue
if jsTextArray[i][0:1] == "'" and fileType[0:2] == "vb":
madeAChange = True
jsTextArray[i] = ""
continue
if jsTextArray[i][0:2] == "/*" and (fileType == "js" or fileType == "css"):
madeAChange = True
commentsOpen = True
jsTextArray[i] = checkForCommentEnd(jsTextArray[i])
continue
jsTextArray[i] = removeTrailingSpace(jsTextArray[i])
jsTextArray[i] = removeTrailingTab(jsTextArray[i])
# language-specific checks
if fileType == "js":
jsTextArray[i] = removeLeadingSpace(jsTextArray[i])
jsTextArray[i] = removeLeadingTab(jsTextArray[i])
jsTextArray[i] = removeInternalJSComments(i)
checkforEOLChar(i, "\\")
#checkForQuotes(jsTextArray[i])
#jsTextArray[i] = removeInternalSpaces(jsTextArray[i])
# --- check to be sure not in quotes first
if fileType == "css":
jsTextArray[i] = removeLeadingSpace(jsTextArray[i])
jsTextArray[i] = removeLeadingTab(jsTextArray[i])
jsTextArray[i] = removeInternalJSComments(i);
jsTextArray[i] = removeInternalSpaces(jsTextArray[i])
# --- check to be sure not in quotes first
if fileType[0:2] == "py":
jsTextArray[i] = removeIndentedComment(jsTextArray[i], "#")
checkforEOLChar(i, "\\")
#jsTextArray[i] = removeInternalSpaces(jsTextArray[i])
# --- check to be sure not in quotes first
if fileType[0:2] == "vb":
jsTextArray[i] = removeIndentedComment(jsTextArray[i], "'")
jsTextArray[i] = removeLeadingSpace(jsTextArray[i])
jsTextArray[i] = removeLeadingTab(jsTextArray[i])
checkforEOLChar(i, "_")
#jsTextArray[i] = removeInternalSpaces(jsTextArray[i])
# --- check to be sure not in quotes first
if fileType[0:2] == "ht":
jsTextArray[i] = removeLeadingSpace(jsTextArray[i])
jsTextArray[i] = removeLeadingTab(jsTextArray[i])
#jsTextArray[i] = removeInternalSpaces(jsTextArray[i])
# --- check to be sure not in <pre> tag first, can run it for everything inside < >
def makeOutputString():
# copy minified text into string
# if do away with this function for performance reasons, just write array to file with "\n", " ", or nothing between lines
global jsText, comment, fileType
if comment <> "":
# if a comment will be added
if fileType == "js":
comment = "// " + comment
if fileType[0:2] == "py":
comment = "#" + comment
if fileType[0:2] == "vb":
comment = "'" + comment
else:
comment = "/* " + comment + " */"
jsText = comment + "\n"
if fileType[0:2] == "ht":
# if HTML or HTA file, check to see where <body> starts; concatenate everything before that
bodyStart = concatHTMLHead()
for i in range(0, len(jsTextArray)):
# put new lines into a string. if ; at end, don't add \n
if jsTextArray[i] == "":
continue
else:
if (fileType == "js" or fileType == "css"):
# if a JS/CSS file
if i < len(jsTextArray)-1:
# if not the last line
if (alphanumerics.search(jsTextArray[i][len(jsTextArray[i])-1])) and (alphanumerics.search(jsTextArray[i+1][0:1])):
# if both are alphanumeric, don't consolidate the lines
if checkForInternalSingleComment(jsTextArray[i]): # this is just until can remove the comments?
jsText = jsText + jsTextArray[i] + "\n"
else:
jsText = jsText + jsTextArray[i] + " "
else:
if alphanumerics.search(jsTextArray[i][len(jsTextArray[i])-1]):
# if last char is alphanumeric but 1st char on next line isn't:
if checkForInternalSingleComment(jsTextArray[i]):
jsText = jsText + jsTextArray[i] + "\n"
else:
condenseLines(i)
else:
# if neither are alphanumeric
jsText = jsText + jsTextArray[i]
else:
# if the last line
jsText = jsText + jsTextArray[len(jsTextArray)-1]
else:
# for other file types
if fileType[0:2] == "ht":
# if HTML / HTA
if (i < bodyStart):
# if not at the <body> tag yet
continue;
jsText = jsText + jsTextArray[i] + "\n"
def getSizes():
global sizeBefore, sizeAfter, percentSmaller
sizeBefore = os.stat(fileName).st_size
sizeAfter = os.stat(jsMinName).st_size
percentSmaller = int(100 - ((sizeAfter * 1.00) / sizeBefore * 100))
sizeBefore = "%s %s" % (sizeBefore, "bytes")
sizeAfter = "%s %s" % (sizeAfter, "bytes")
def showResults():
# show before & after file size, new file name
print
print "File minified to: ", jsMinName
print
print "File size before minify: ", sizeBefore
print "File size after minify: ", sizeAfter
print
print "Compressed", str(percentSmaller) + "%"
# ------------ Start ------------------
clear()
getParams()
jsFile = open(fileName, "r")
jsTextArray = jsFile.read().split('\n')
fileType = getFileType()
getMinName()
jsFile.close()
loopThrough()
makeOutputString()
saveMinFile()
getSizes()
showResults()
|
|
#!/usr/bin/python
# app.py
from flask import Flask
from flask import request, render_template
import re
import sys
import pymongo
from pymongo import MongoClient
import urlparse
import requests
import logging
import os
from flask import json
from bson.objectid import ObjectId
import ast # to convert unicode to dict
#import scanForFilms
# coding: utf-8
import paho.mqtt.client as mqtt
#mqtt info:
def mqtt_publish(topic, payload):
host_mqtt = '192.168.1.71'
port_mqtt = 1883 # SSL/TLS = 8883
mqttc = mqtt.Client('python_pub')
mqttc.connect(host_mqtt, port_mqtt)
mqttc.publish(topic, payload)
mqttc.loop(2) #timeout = 2s
return
app = Flask(__name__)
app.logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter('[%(asctime)s] [%(module)s:%(lineno)d][%(levelname)s] %(message)s')
stream_handler.setFormatter(stream_formatter)
app.logger.addHandler(stream_handler)
# global variable (not used at present)
config_file = "config.json"
if ('DB_PORT_27017_TCP_ADDR' in os.environ):
host = os.environ['DB_PORT_27017_TCP_ADDR']
else:
host = '172.17.0.1'
client = MongoClient(host, 27017)
db = client.movies # db = client.primer
def convert(input):
if isinstance(input, dict):
return dict((convert(key), convert(value)) for key, value in input.iteritems())
elif isinstance(input, list):
return [convert(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
@app.route('/', methods=["GET"])
def route_getbase():
app.logger.info('/ GET url')
genres, directors, films = getBasicMetadata()
posts = db.movies.find()
return render_template('home.html', genres=genres, directors=directors, posts=films)
# Work in Progess - requires wsgi Container to have visibility on folders that videos are in.
@app.route('/movieinfo/scan', methods=["GET"])
def route_getmoviescan():
app.logger.info('/movieinfo/scan GET url')
# Call scanForFilms to scan/add movies to mongodB:
mqtt_topic = 'hello/world'
mqtt_payload = 'scanForFilms'
mqtt_publish(mqtt_topic, mqtt_payload)
# Insert mqtt call to trigger python call:
#scanForFilms.main()
page = 1
pagesize = 25
skip = page * pagesize
posts = db.movies.find().sort(('Title'), pymongo.ASCENDING).limit(pagesize).skip(skip)
return render_template('movieinfoall.html', posts=posts, page=page)
# @app.route('/movieinfo/delete/', methods=["GET"])
# def route_getmoviedelete():
# app.logger.info('/movieinfo/delete GET url')
# empty = db.movies.remove({"Title":""})
# app.logger.info("deleted an item?")
#
# page = 1
# pagesize = 25
# skip = page * pagesize
# posts = db.movies.find().sort(('Title'), pymongo.ASCENDING).limit(pagesize).skip(skip)
#
# return render_template('movieinfoall.html', posts=posts, page=page)
@app.route('/movieinfo/delete/<imdbid>/<page>', methods=["GET"])
def route_getmoviedeleteimdbid(imdbid, page):
app.logger.info('/movieinfo/delete/<imdbid>/<page> GET url')
if imdbid:
app.logger.info(imdbid)
# Remove record:
post = db.movies.delete_one({'_id': ObjectId(imdbid)})
if page:
page = int(page)
else:
page = 1
pagesize = 25
skip = page * pagesize
posts = db.movies.find().sort(('Title'), pymongo.ASCENDING).limit(pagesize).skip(skip)
return render_template('movieinfoall.html', posts=posts, page=page)
@app.route('/movieinfo/all', methods=["GET"])
def route_getmovieinfoall():
app.logger.info('/movieinfo/all GET url')
url = request.values # Get value from GET(/POST) request
page = 1
if 'page' in url:
page = int(url['page'])
pagesize = 25
skip = page * pagesize
app.logger.info(skip)
posts = db.movies.find().sort(('Title'), pymongo.ASCENDING).limit(pagesize).skip(skip)
return render_template('movieinfoall.html', posts=posts, page=page)
@app.route('/movieinfo/film', methods=["GET"])
def route_getmovieinfofilm():
app.logger.info('/movieinfo/film GET url')
url = request.values # Get value from GET(/POST) request
if 'moviename' in url:
search = url['moviename']
# Get matching entries
posts = db.movies.find({'Title': {'$regex': search, "$options": "$i"}}).sort(('Title'), pymongo.DESCENDING)
else:
# Get all entries
posts = db.movies.find().sort(('Title'), pymongo.DESCENDING)
return render_template('movieinfofilm.html', posts=posts)
@app.route('/movieinfo/genre', methods=["GET"])
def route_getmoviegenre():
app.logger.info('/movieinfo/genre GET url')
url = request.values # Get value from GET(/POST) request
genres, directors, posts = getBasicMetadata()
if url.keys(): # Get keys of url and add them to array
genrelist = url.keys()
app.logger.info(genrelist)
search = '|'.join(genrelist)
app.logger.info(search)
posts = db.movies.find({'Genre': {'$regex': search, "$options": "$i"}}).sort(('imdbRating'), pymongo.DESCENDING)
return render_template('movieinfogenre.html', posts=posts, genres=genres)
@app.route('/movieinfo/director', methods=["GET"])
def route_getmoviedirector():
app.logger.info('/movieinfo/director GET url')
url = request.values # Get value from GET(/POST) request
genres, directors, posts = getBasicMetadata()
if 'director' in url:
# Get matching entries
search = url['director']
app.logger.info(search)
posts = db.movies.find({'Director': {'$regex': search, "$options": "$i"}}).sort(('Title'), pymongo.DESCENDING)
else:
# Get all entries
posts = db.movies.find().sort(('Title'), pymongo.DESCENDING)
return render_template('movieinfodirector.html', posts=posts, directors=directors)
@app.route('/movieinfo/imdb', methods=["GET"])
def route_getmovieimdb():
app.logger.info('/movieinfo/imdb GET url')
url = request.values # Get value from GET(/POST) request
if 'sortby' in url:
if url['sortby'] == "asc":
operator = "$gte"
elif url['sortby'] == "desc":
operator = "$lte"
else:
operator = "$eq"
if 'imdbrating' in url:
imdbrating = url['imdbrating']
if 'optsortby' in url:
opt_operator = ''
if url['optsortby'] == "asc":
opt_operator = "$gte"
elif url['optsortby'] == "desc":
opt_operator = "$lte"
elif url['optsortby'] == "equal":
opt_operator = "$eq"
if opt_operator:
app.logger.info(opt_operator)
else:
app.logger.warn("Not defined!")
if 'optimdbrating' in url:
opt_imdbrating = url['optimdbrating']
app.logger.info(opt_imdbrating)
if 'sort' in url:
sort = url['sort']
app.logger.info(sort)
if opt_operator and opt_imdbrating:
# posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A", opt_operator: opt_imdbrating}}).sort(('imdbRating'), pymongo.DESCENDING).limit(pagesize).skip(page*pagesize)
if sort == "DESCENDING":
posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A", opt_operator: opt_imdbrating}}).sort(('imdbRating'), pymongo.DESCENDING)
else:
posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A", opt_operator: opt_imdbrating}}).sort(('imdbRating'), pymongo.ASCENDING)
else:
if sort == "DESCENDING":
# posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A"}}).sort(('imdbRating'), pymongo.DESCENDING).limit(pagesize).skip(page*pagesize)
posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A"}}).sort(('imdbRating'), pymongo.DESCENDING)
else:
posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A"}}).sort(('imdbRating'), pymongo.ASCENDING)
return render_template('movieinfoimdb.html', posts=posts)
@app.route('/movieinfo', methods=["GET"])
def route_getexample():
app.logger.info('/movieinfo GET X url')
#url = request.values # Get value from GET(/POST) request
url = request.args.get('add')
url = url[1:len(url)-1]
url = convert(url)
app.logger.info(url)
app.logger.info(type(url))
if 'moviename' in url:
app.logger.info('moviename found in url')
posts = db.movies.find({"Title": url['moviename']}).sort(('Title'), pymongo.DESCENDING)
found = posts.count()
return render_template('index.html', posts=posts, found=found)
if url:
moviejson = {}
interim = ast.literal_eval(url)
for item in interim:
moviejson[item] = interim[item]
#temp1 = url[0] # url[i] is unicode
#app.logger.info("get json! = " + str(temp1))
app.logger.info(moviejson)
# Strip '[' & ']' from temp, use ast to convert unicode dict string to real dict.
#moviejson = ast.literal_eval(temp[1:len(temp)-1])
app.logger.info(type(moviejson))
app.logger.info(moviejson)
posts = db.movies.insert_one(moviejson)
posts = db.movies.find({"Title": moviejson['Title']})
found = 1
return render_template('index.html', posts=posts, found=found)
posts = json.dumps({'text': '1234'})
found = 0
return render_template('index.html', posts=posts, found=found)
@app.route('/movieinfo', methods=["POST"])
def route_postexample():
app.logger.info('/movieinfo POST url')
httpsearch = request.form['text']
app.logger.info(httpsearch)
posts = db.movies.find({"Title": httpsearch})
app.logger.info(posts.count())
if posts.count() > 0:
found = 1
return render_template('index.html', posts=posts, found=found)
else:
posts = getmatch(httpsearch)
if posts:
found = "yes"
else:
posts = {"Title": "X-men"} # Dummy data
found = 0
return render_template('index.html', posts=posts, found=found)
@app.route('/image', methods=["GET"])
def route_getimage():
app.logger.info('/image GET url')
genres, directors, films = getBasicMetadata()
moviejson = db.movies.find({"Title": "Fargo"}).limit(1)
app.logger.info(moviejson)
getPoster(moviejson)
posts = db.movies.find()
return render_template('home.html', genres=genres, directors=directors, posts=films)
def getBasicMetadata():
alltype = db.movies.find()
genres = []
directors = []
films = []
for film in alltype:
if "Genre" in film:
genrefile = film['Genre'].split(",")
for i in genrefile:
genres.append(i.strip())
if "Director" in film:
dirfile = film['Director'].split(",")
for i in dirfile:
directors.append(i.strip())
if "Title" in film:
films.append(film['Title'])
gen = list(set(genres))
dirs = list(set(directors))
return gen, dirs, list(set(films))
def getPoster(cursor):
for moviejson in cursor:
app.logger.info(moviejson)
if "Poster" in moviejson:
app.logger.info(moviejson['Poster'])
image = requests.get(moviejson['Poster'])
poster = str(moviejson['Poster'])
index = poster.rfind('.')
ext = poster[index + 1:]
name = str(moviejson['Title'])
try:
with open(name + '.' + ext, "wb") as code1:
#app.logger.info(image.content)
code1.write(image.content)
code1.close()
except:
pass
return
def getmatch(film):
movielist = []
baseUrl = "http://www.omdbapi.com/"
try:
r = requests.get(baseUrl + "?t="+film+"&y=&plot=long&r=json")
app.logger.info(r.status_code)
moviejson = r.json()
#app.logger.info(moviejson)
if 'Awards' in moviejson:
app.logger.info("Found Awards in moviejson")
del moviejson['Awards']
app.logger.info(moviejson)
except requests.exceptions.RequestException as e:
app.logger.warn(e)
sys.exit(1)
app.logger.info(moviejson)
movielist.append(moviejson)
return movielist # str(db.users.find().pretty())
###########################################
# WIP Stuff
###########################################
def writeConfig(json_to_write):
with open(config_file, mode='w') as out:
res = json.dump(
json_to_write,
out,
sort_keys=True,
indent=4,
separators=(
',',
': '))
out.close()
return
def readConfig():
with open(config_file, mode='r') as out:
input_json = json.load(out)
out.close()
return input_json
@app.route('/options', methods=["GET"])
def route_getoptions():
app.logger.info('/options GET url')
genres, directors, posts = getBasicMetadata()
url = request.values # Get value from GET(/POST) request
posts = {"Title": "X-men"}
app.logger.info(url)
if len(url) == 1:
query = {}
value = url.values() # Get values from dict
query['Genre'] = value[0]
posts = db.movies.find(query).sort(('imdbRating'), pymongo.DESCENDING)
app.logger.info(value[0])
else:
query = []
for u in url:
querydict = {}
querydict['Genre'] = url[u]
query.append(querydict)
app.logger.info(query)
posts = db.movies.find({'$or': query}).sort(('imdbRating'), pymongo.DESCENDING)
app.logger.info(posts)
page = 1
if 'page' in url:
page = int(url['page'])
pagesize = 20
if 'pagesize' in url:
pagesize = str(url['pagesize'])
#posts = db.movies.find({"Genre": "Adventure"})
posts = db.movies.find().sort(('Title'), pymongo.DESCENDING).limit(pagesize).skip(page*pagesize)
#for f in posts:
# app.logger.info(f)
# result = db.test.delete_one({'x': 1})
# directors = getDirector()
return render_template('displayOptions.html', genres=genres, directors=directors, posts=posts, page=page, pagesize=pagesize)
@app.route('/options', methods=["POST"])
def route_postoptions():
app.logger.info('/options POST url')
text1 = request.form['0']
app.logger.info(text1)
genres, directors = getGenre()
# directors = getDirector()
# bb
return render_template('displayOptions.html', genres=genres, directors=directors)
# List of reference accesses via pymongo that I've tried:
# posts = db.movies.find({'Title': '/.*Sup.*/'})
# posts = db.movies.find({"Genre": {"$elemMatch": {"$eq": "Action", "$eq": "Comedy"}}})
# posts = db.movies.find({"$or": [{"Genre": {"$in": genrelist}}]})
# posts = db.movies.find({"$where": 'function() {var genre = this.Genre.split(","); for (i = 0; i < genre.length; i++) { if (genre == "Action") return this.genre; } }'})
# db.inventory.find( { $or: [ { quantity: { $lt: 20 } }, { price: 10 } ] })
# posts = db.movies.find({"Genre": "Action, Adventure, Drama"})
# posts = db.movies.find({"Genre": { $elemMatch: {"$in": genrelist}}})
# posts = db.movies.find({"Genre": {"$elemMatch": {"Genre": genrelist}}})
# posts = db.movies.find()
# posts = db.movies.find({"Genre": { "$in": genrelist}})
# posts = db.movies.find({"Genre": { "$in": genrelist}})
# posts = db.movies.find({"Genre": { $elemMatch: {"$in": genrelist}}})
# posts = db.movies.find()
# resultdb = db.movies.insert_one(moviejson)
# moviejson = db.movies.find({"Title": "Fargo"}).limit(1)\
def getlink(full_path_file_name, return_type):
path_file_name = full_path_file_name.split('/')
if len(path_file_name) > 1:
filename = path_file_name[len(path_file_name)-1]
path = path_file_name[0]
for p in range(1, len(path_file_name)-1):
path = path + '/' + path_file_name[p]
else:
filename = path_file_name[0]
path = ''
if return_type == "filename":
return filename
else:
return path
|
|
#!/usr/bin/env python
from google.appengine.ext.webapp import template
from google.appengine.ext import ndb
from google.appengine.api import mail
import cgi
import logging
import os.path
import webapp2
import models
from webapp2_extras import auth
from webapp2_extras import sessions
from webapp2_extras.auth import InvalidAuthIdError
from webapp2_extras.auth import InvalidPasswordError
current_sem = 1
registration_open = True
def user_required(handler):
"""
Decorator that checks if there's a user associated with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
auth = self.auth
if not auth.get_user_by_session():
self.redirect(self.uri_for('login'), abort=True)
else:
return handler(self, *args, **kwargs)
return check_login
def admin_required(handler):
"""
Decorator that checks if there's a user associated with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
auth = self.auth.get_user_by_session()
if not auth or auth['role']!='admin':
self.redirect(self.uri_for('login'), abort=True)
else:
return handler(self, *args, **kwargs)
return check_login
def faculty_required(handler):
"""
Decorator that checks if there's a user associated with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
auth = self.auth.get_user_by_session()
if not auth or auth['role']!='faculty':
self.redirect(self.uri_for('login'), abort=True)
else:
return handler(self, *args, **kwargs)
return check_login
class BaseHandler(webapp2.RequestHandler):
@webapp2.cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@webapp2.cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes that are stored
in the session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
:returns
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@webapp2.cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
:returns
The instance of the user model associated to the logged in user.
"""
u = self.user_info
return self.user_model.get_by_id(u['user_id']) if u else None
@webapp2.cached_property
def user_model(self):
"""Returns the implementation of the user model.
It is consistent with config['webapp2_extras.auth']['user_model'], if set.
"""
return self.auth.store.user_model
@webapp2.cached_property
def session(self):
"""Shortcut to access the current session."""
return self.session_store.get_session(backend="datastore")
def render_template(self, view_filename, params=None):
if not params:
params = {}
user = self.user_info
params['user'] = user
path = os.path.join(os.path.dirname(__file__),'..', 'templates', view_filename)
self.response.out.write(template.render(path, params))
def display_message(self, message, role="admin"):
"""Utility function to display a template with a simple message."""
params = {
'message': message
}
if(role=="faculty"):
self.render_template('faculty/message.html',params)
elif(role=="student"):
self.render_template('student/message.html',params)
else:
self.render_template('message.html', params)
def display_popup(self, params):
#params = {"message":message}
self.render_template('popup.html',params)
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandler(BaseHandler):
def get(self):
self.render_template('main.html')
class ForgotPasswordHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
user = self.user_model.get_by_auth_id(username)
if not user:
logging.info('Could not find any user entry for username %s', username)
self._serve_page(not_found=True)
return
user_id = user.get_id()
email = user.email_address
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='p', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to reset their password. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
sender_address = "deepakkoli93@gmail.com"
subject = "Change your password"
body = """Thank you for creating an account! Please change your password by clicking on the link below:%s""" % verification_url
mail.send_mail(sender_address, email, subject, body)
self.redirect(self.uri_for('login'), abort=True)
self.display_message(msg.format(url=verification_url))
def _serve_page(self, not_found=False):
username = self.request.get('username')
params = {
'username': username,
'not_found': not_found
}
self.render_template('forgot.html', params)
class VerificationHandler(BaseHandler):
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token)
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token,
'signup')
if not user:
logging.info('Could not find any user with id "%s" signup token "%s"',
user_id, signup_token)
self.abort(404)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.display_message('User email address has been verified.')
return
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params)
else:
logging.info('verification type not supported')
self.abort(404)
class SetPasswordHandler(BaseHandler):
@user_required
def post(self):
password = self.request.get('password')
old_token = self.request.get('t')
if not password or password != self.request.get('confirm_password'):
self.display_message('passwords do not match')
return
user = self.user
user.set_password(password)
user.put()
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), old_token)
self.display_message('Password updated')
class LoginHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
password = self.request.get('password')
try:
u = self.auth.get_user_by_password(username, password, remember=True,
save_session=True)
user_data = self.user
logging.info('user data is %s' %user_data)
#self.display_message('Welcome %s' %u['name'] )
#self.display_message('you are %s' %user_data.role)
if(user_data.role=='admin'):
self.redirect(self.uri_for('admin'))
if(user_data.role=='student'):
self.redirect(self.uri_for('student'))
if(user_data.role=='faculty'):
self.redirect(self.uri_for('faculty'))
#self.render_template('admin.html',False)
#if(u['role']=='student'):
#self.display_message('you are a student')
#self.redirect(self.uri_for('home'))
except (InvalidAuthIdError, InvalidPasswordError) as e:
logging.info('Login failed for user %s because of %s', username, type(e))
self._serve_page(True)
def _serve_page(self, failed=False):
username = self.request.get('username')
params = {
'username': username,
'failed': failed
}
self.render_template('login.html', params)
class LogoutHandler(BaseHandler):
def get(self):
self.auth.unset_session()
self.redirect(self.uri_for('main'))
class AuthenticatedHandler(BaseHandler):
@user_required
def get(self):
self.render_template('authenticated.html')
class GodmodeHandler(BaseHandler):
@admin_required
def get(self):
stat = models.Registration_status(open=True, id="registration_status")
stat.put()
sem = models.Semester(semester = "I sem 2015-2016", id="current_sem")
sem.put()
self.display_message('registration status put as true and semester entity created')
config = {
'webapp2_extras.auth': {
'user_model': 'models.User',
'user_attributes': ['name','role']
},
'webapp2_extras.sessions': {
'secret_key': 'YOUR_SECRET_KEY'
}
}
|
|
"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.relay.analysis import free_vars, free_type_vars, assert_alpha_equal
from tvm.relay import create_executor, transform
from tvm.relay.transform import gradient
from tvm.relay.prelude import Prelude
from tvm.relay.testing import add_nat_definitions, make_nat_expr, run_infer_type, check_grad, rand
import tvm.relay.op as op
def test_id():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func, mode="first_order"))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), np.ones_like(x.asnumpy()))
def test_add():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x + x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), 2 * x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), 2 * np.ones_like(x.asnumpy()))
def test_check_grad():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], x + y)
check_grad(func)
def test_temp_add():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = x + x
func = relay.Function([x], y + y)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
def test_sub():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x - x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), np.zeros_like(x.asnumpy()))
tvm.testing.assert_allclose(grad.asnumpy(), np.zeros_like(x.asnumpy()))
def test_broadcast_add():
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = 'float32'
x_nd = rand(dtype, *shape1)
y_nd = rand(dtype, *shape2)
x_np = x_nd.asnumpy()
y_np = y_nd.asnumpy()
expected_forward = x_np + y_np
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
func = relay.Function([x, y], x + y)
func = run_infer_type(func)
full_func = run_infer_type(gradient(func))
assert full_func.checked_type == relay.FuncType([t1, t2],
relay.TupleType([relay.TensorType(expected_forward.shape, dtype),
relay.TupleType([t1, t2])]))
ex = create_executor()
forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(),
np.ones_like(expected_forward).sum(axis=2, keepdims=True))
tvm.testing.assert_allclose(grad_y.asnumpy(),
np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))
def test_broadcast_subtract():
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = 'float32'
x_nd = rand(dtype, *shape1)
y_nd = rand(dtype, *shape2)
x_np = x_nd.asnumpy()
y_np = y_nd.asnumpy()
expected_forward = x_np - y_np
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
func = relay.Function([x, y], x - y)
func = run_infer_type(func)
full_func = run_infer_type(gradient(func))
assert full_func.checked_type == relay.FuncType([t1, t2],
relay.TupleType([relay.TensorType(expected_forward.shape, dtype),
relay.TupleType([t1, t2])]))
ex = create_executor()
forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(),
np.ones_like(expected_forward).sum(axis=2, keepdims=True))
tvm.testing.assert_allclose(grad_y.asnumpy(),
-np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))
def test_tuple():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
z = relay.var("z", t)
tup = relay.Var("tup")
func = relay.Function([x, y, z], relay.Let(tup, relay.Tuple([x, y, z]),
relay.TupleGetItem(tup, 0) +
relay.TupleGetItem(tup, 1) -
relay.TupleGetItem(tup, 2)))
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])]))
x_nd = rand(dtype, *shape)
y_nd = rand(dtype, *shape)
z_nd = rand(dtype, *shape)
x_np = x_nd.asnumpy()
y_np = y_nd.asnumpy()
z_np = z_nd.asnumpy()
expected_forward = x_np + y_np - z_np
ex = create_executor()
forward, (grad_x, grad_y, grad_z) = ex.evaluate(back_func)(x_nd, y_nd, z_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(), np.ones_like(grad_x.asnumpy()))
tvm.testing.assert_allclose(grad_y.asnumpy(), np.ones_like(grad_y.asnumpy()))
tvm.testing.assert_allclose(grad_z.asnumpy(), -1 * np.ones_like(grad_z.asnumpy()))
def test_pow():
mod = relay.Module()
p = Prelude(mod)
add_nat_definitions(p)
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
double = relay.Function([x], x + x)
i = relay.var("i", t)
func = relay.Function([i], p.nat_iterate(double, make_nat_expr(p, 3))(i))
mod["main"] = func
mod["main"] = gradient(mod["main"], mod=mod)
m = transform.InferType()(mod)
back_func = m["main"]
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
i_nd = rand(dtype, *shape)
ex = create_executor(mod=mod)
forward, (grad_i,) = ex.evaluate(back_func)(i_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
tvm.testing.assert_allclose(grad_i.asnumpy(), 8 * np.ones_like(grad_i.asnumpy()))
def test_ref():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
r = relay.Var("r")
u = relay.Var("u")
body = relay.RefRead(r)
body = relay.Let(u, relay.RefWrite(r, relay.RefRead(r) + relay.RefRead(r)), body)
body = relay.Let(r, relay.RefCreate(x), body)
func = relay.Function([x], body)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x_nd = rand(dtype, *shape)
ex = create_executor()
forward, (grad_x,) = ex.evaluate(back_func)(x_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))
def test_square_second_order():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x * x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
y = relay.var("y", t)
back_func_adjusted = relay.Function([y], relay.TupleGetItem(relay.TupleGetItem(back_func(y), 1), 0))
back_func_adjusted = run_infer_type(back_func_adjusted)
back_back_func = run_infer_type(gradient(back_func_adjusted))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x_nd = rand(dtype, *shape)
ex = create_executor()
forward, (grad_x,) = ex.evaluate(back_back_func)(x_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))
def test_if():
x = relay.var("x", shape=(1, 16, 64, 64))
y = relay.var("y", shape=(1, 16, 64, 64))
cond = relay.var("cond", shape=(), dtype='uint1')
net = relay.If(cond, x, y)
net = relay.log(net)
func = relay.Function(free_vars(net), net)
func = run_infer_type(func)
net = gradient(func, mode='higher_order')
net = run_infer_type(net)
def test_grad_tuple():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = x + x
func = relay.Function([x], relay.Tuple([y + y, y]))
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([relay.TupleType([t, t]), relay.TupleType([t])]))
ex = create_executor()
x = rand(dtype, *shape)
(forward_four, forward_two), (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward_four.asnumpy(), 4 * x.asnumpy())
tvm.testing.assert_allclose(forward_two.asnumpy(), 2 * x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
def test_concat():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
rt = relay.TensorType((10, 20), dtype)
x = relay.var("x", t)
y = op.concatenate([x, x], axis=1)
func = relay.Function([x], y)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert_alpha_equal(back_func.checked_type, relay.FuncType([t], relay.TupleType([rt, relay.TupleType([t])])))
# no value validation as concatenate has dummy gradient right now.
if __name__ == "__main__":
test_id()
test_add()
test_temp_add()
test_sub()
test_broadcast_add()
test_broadcast_subtract()
test_tuple()
test_pow()
test_ref()
test_square_second_order()
test_if()
test_grad_tuple()
|
|
# step 1. imports
from sqlalchemy import (create_engine, MetaData, Table, Column, Integer,
String, ForeignKey, Float, DateTime, event)
from sqlalchemy.orm import sessionmaker, mapper, relationship
from sqlalchemy.ext.horizontal_shard import ShardedSession
from sqlalchemy.sql import operators, visitors
import datetime
# step 2. databases.
# db1 is used for id generation. The "pool_threadlocal"
# causes the id_generator() to use the same connection as that
# of an ongoing transaction within db1.
echo = True
db1 = create_engine('sqlite://', echo=echo, pool_threadlocal=True)
db2 = create_engine('sqlite://', echo=echo)
db3 = create_engine('sqlite://', echo=echo)
db4 = create_engine('sqlite://', echo=echo)
# step 3. create session function. this binds the shard ids
# to databases within a ShardedSession and returns it.
create_session = sessionmaker(class_=ShardedSession)
create_session.configure(shards={
'north_america':db1,
'asia':db2,
'europe':db3,
'south_america':db4
})
# step 4. table setup.
meta = MetaData()
# we need a way to create identifiers which are unique across all
# databases. one easy way would be to just use a composite primary key, where one
# value is the shard id. but here, we'll show something more "generic", an
# id generation function. we'll use a simplistic "id table" stored in database
# #1. Any other method will do just as well; UUID, hilo, application-specific, etc.
ids = Table('ids', meta,
Column('nextid', Integer, nullable=False))
def id_generator(ctx):
# in reality, might want to use a separate transaction for this.
c = db1.connect()
nextid = c.execute(ids.select(for_update=True)).scalar()
c.execute(ids.update(values={ids.c.nextid : ids.c.nextid + 1}))
return nextid
# table setup. we'll store a lead table of continents/cities,
# and a secondary table storing locations.
# a particular row will be placed in the database whose shard id corresponds to the
# 'continent'. in this setup, secondary rows in 'weather_reports' will
# be placed in the same DB as that of the parent, but this can be changed
# if you're willing to write more complex sharding functions.
weather_locations = Table("weather_locations", meta,
Column('id', Integer, primary_key=True, default=id_generator),
Column('continent', String(30), nullable=False),
Column('city', String(50), nullable=False)
)
weather_reports = Table("weather_reports", meta,
Column('id', Integer, primary_key=True),
Column('location_id', Integer, ForeignKey('weather_locations.id')),
Column('temperature', Float),
Column('report_time', DateTime, default=datetime.datetime.now),
)
# create tables
for db in (db1, db2, db3, db4):
meta.drop_all(db)
meta.create_all(db)
# establish initial "id" in db1
db1.execute(ids.insert(), nextid=1)
# step 5. define sharding functions.
# we'll use a straight mapping of a particular set of "country"
# attributes to shard id.
shard_lookup = {
'North America':'north_america',
'Asia':'asia',
'Europe':'europe',
'South America':'south_america'
}
def shard_chooser(mapper, instance, clause=None):
"""shard chooser.
looks at the given instance and returns a shard id
note that we need to define conditions for
the WeatherLocation class, as well as our secondary Report class which will
point back to its WeatherLocation via its 'location' attribute.
"""
if isinstance(instance, WeatherLocation):
return shard_lookup[instance.continent]
else:
return shard_chooser(mapper, instance.location)
def id_chooser(query, ident):
"""id chooser.
given a primary key, returns a list of shards
to search. here, we don't have any particular information from a
pk so we just return all shard ids. often, you'd want to do some
kind of round-robin strategy here so that requests are evenly
distributed among DBs.
"""
return ['north_america', 'asia', 'europe', 'south_america']
def query_chooser(query):
"""query chooser.
this also returns a list of shard ids, which can
just be all of them. but here we'll search into the Query in order
to try to narrow down the list of shards to query.
"""
ids = []
# we'll grab continent names as we find them
# and convert to shard ids
for column, operator, value in _get_query_comparisons(query):
# "shares_lineage()" returns True if both columns refer to the same
# statement column, adjusting for any annotations present.
# (an annotation is an internal clone of a Column object
# and occur when using ORM-mapped attributes like
# "WeatherLocation.continent"). A simpler comparison, though less accurate,
# would be "column.key == 'continent'".
if column.shares_lineage(weather_locations.c.continent):
if operator == operators.eq:
ids.append(shard_lookup[value])
elif operator == operators.in_op:
ids.extend(shard_lookup[v] for v in value)
if len(ids) == 0:
return ['north_america', 'asia', 'europe', 'south_america']
else:
return ids
def _get_query_comparisons(query):
"""Search an orm.Query object for binary expressions.
Returns expressions which match a Column against one or more
literal values as a list of tuples of the form
(column, operator, values). "values" is a single value
or tuple of values depending on the operator.
"""
binds = {}
clauses = set()
comparisons = []
def visit_bindparam(bind):
# visit a bind parameter.
# check in _params for it first
if bind.key in query._params:
value = query._params[bind.key]
elif bind.callable:
# some ORM functions (lazy loading)
# place the bind's value as a
# callable for deferred evaulation.
value = bind.callable()
else:
# just use .value
value = bind.value
binds[bind] = value
def visit_column(column):
clauses.add(column)
def visit_binary(binary):
# special handling for "col IN (params)"
if binary.left in clauses and \
binary.operator == operators.in_op and \
hasattr(binary.right, 'clauses'):
comparisons.append(
(binary.left, binary.operator,
tuple(binds[bind] for bind in binary.right.clauses)
)
)
elif binary.left in clauses and binary.right in binds:
comparisons.append(
(binary.left, binary.operator,binds[binary.right])
)
elif binary.left in binds and binary.right in clauses:
comparisons.append(
(binary.right, binary.operator,binds[binary.left])
)
# here we will traverse through the query's criterion, searching
# for SQL constructs. We will place simple column comparisons
# into a list.
if query._criterion is not None:
visitors.traverse_depthfirst(query._criterion, {},
{'bindparam':visit_bindparam,
'binary':visit_binary,
'column':visit_column
}
)
return comparisons
# further configure create_session to use these functions
create_session.configure(
shard_chooser=shard_chooser,
id_chooser=id_chooser,
query_chooser=query_chooser
)
# step 6. mapped classes.
class WeatherLocation(object):
def __init__(self, continent, city):
self.continent = continent
self.city = city
class Report(object):
def __init__(self, temperature):
self.temperature = temperature
# step 7. mappers
mapper(WeatherLocation, weather_locations, properties={
'reports':relationship(Report, backref='location')
})
mapper(Report, weather_reports)
# step 8 (optional), events. The "shard_id" is placed
# in the QueryContext where it can be intercepted and associated
# with objects, if needed.
def add_shard_id(instance, ctx):
instance.shard_id = ctx.attributes["shard_id"]
event.listen(WeatherLocation, "load", add_shard_id)
event.listen(Report, "load", add_shard_id)
# save and load objects!
tokyo = WeatherLocation('Asia', 'Tokyo')
newyork = WeatherLocation('North America', 'New York')
toronto = WeatherLocation('North America', 'Toronto')
london = WeatherLocation('Europe', 'London')
dublin = WeatherLocation('Europe', 'Dublin')
brasilia = WeatherLocation('South America', 'Brasila')
quito = WeatherLocation('South America', 'Quito')
tokyo.reports.append(Report(80.0))
newyork.reports.append(Report(75))
quito.reports.append(Report(85))
sess = create_session()
for c in [tokyo, newyork, toronto, london, dublin, brasilia, quito]:
sess.add(c)
sess.commit()
tokyo_id = tokyo.id
sess.close()
t = sess.query(WeatherLocation).get(tokyo_id)
assert t.city == tokyo.city
assert t.reports[0].temperature == 80.0
north_american_cities = sess.query(WeatherLocation).filter(WeatherLocation.continent == 'North America')
assert [c.city for c in north_american_cities] == ['New York', 'Toronto']
asia_and_europe = sess.query(WeatherLocation).filter(WeatherLocation.continent.in_(['Europe', 'Asia']))
assert set([c.city for c in asia_and_europe]) == set(['Tokyo', 'London', 'Dublin'])
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpRetryOperations(object):
"""HttpRetryOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head408(
self, custom_headers=None, raw=False, **operation_config):
"""Return 408 status code, then 200 after retry.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/408'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put500(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 500 status code, then 200 after retry.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch500(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 500 status code, then 200 after retry.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get502(
self, custom_headers=None, raw=False, **operation_config):
"""Return 502 status code, then 200 after retry.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/502'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post503(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 503 status code, then 200 after retry.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/503'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete503(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 503 status code, then 200 after retry.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/503'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put504(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 504 status code, then 200 after retry.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/504'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch504(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Return 504 status code, then 200 after retry.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/504'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
|
"""
kombu.transport.redis
=====================
Redis transport.
"""
from __future__ import absolute_import
from __future__ import with_statement
from bisect import bisect
from contextlib import contextmanager
from time import time
from Queue import Empty
from anyjson import loads, dumps
from kombu.exceptions import (
InconsistencyError,
StdConnectionError,
StdChannelError,
VersionMismatch,
)
from kombu.log import get_logger
from kombu.utils import cached_property, uuid
from kombu.utils.eventio import poll, READ, ERR
try:
from billiard.util import register_after_fork
except ImportError:
try:
from multiprocessing.util import register_after_fork # noqa
except ImportError:
def register_after_fork(*args, **kwargs): # noqa
pass
try:
import redis
except ImportError:
redis = None # noqa
from . import virtual
logger = get_logger('kombu.transport.redis')
DEFAULT_PORT = 6379
DEFAULT_DB = 0
PRIORITY_STEPS = [0, 3, 6, 9]
# This implementation may seem overly complex, but I assure you there is
# a good reason for doing it this way.
#
# Consuming from several connections enables us to emulate channels,
# which means we can have different service guarantees for individual
# channels.
#
# So we need to consume messages from multiple connections simultaneously,
# and using epoll means we don't have to do so using multiple threads.
#
# Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout
# exchanges (broadcast), as an alternative to pushing messages to fanout-bound
# queues manually.
class MutexHeld(Exception):
pass
@contextmanager
def Mutex(client, name, expire):
lock_id = uuid()
if client.setnx(name, lock_id):
client.expire(name, expire)
yield
else:
if not client.ttl(name):
client.expire(name, expire)
raise MutexHeld()
pipe = client.pipeline(True)
try:
pipe.watch(name)
if pipe.get(name) == lock_id:
pipe.multi()
pipe.delete(name)
pipe.execute()
pipe.unwatch()
except redis.WatchError:
pass
class QoS(virtual.QoS):
restore_at_shutdown = True
def __init__(self, *args, **kwargs):
super(QoS, self).__init__(*args, **kwargs)
self._vrestore_count = 0
def append(self, message, delivery_tag):
delivery = message.delivery_info
EX, RK = delivery['exchange'], delivery['routing_key']
with self.pipe_or_acquire() as pipe:
pipe.zadd(self.unacked_index_key, delivery_tag, time()) \
.hset(self.unacked_key, delivery_tag,
dumps([message._raw, EX, RK])) \
.execute()
super(QoS, self).append(message, delivery_tag)
def restore_unacked(self):
for tag in self._delivered:
self.restore_by_tag(tag)
self._delivered.clear()
def ack(self, delivery_tag):
self._remove_from_indices(delivery_tag).execute()
super(QoS, self).ack(delivery_tag)
def reject(self, delivery_tag, requeue=False):
self.ack(delivery_tag)
@contextmanager
def pipe_or_acquire(self, pipe=None):
if pipe:
yield pipe
else:
with self.channel.conn_or_acquire() as client:
yield client.pipeline()
def _remove_from_indices(self, delivery_tag, pipe=None):
with self.pipe_or_acquire(pipe) as pipe:
return pipe.zrem(self.unacked_index_key, delivery_tag) \
.hdel(self.unacked_key, delivery_tag)
def restore_visible(self, start=0, num=10, interval=10):
self._vrestore_count += 1
if (self._vrestore_count - 1) % interval:
return
with self.channel.conn_or_acquire() as client:
ceil = time() - self.visibility_timeout
try:
with Mutex(client, self.unacked_mutex_key,
self.unacked_mutex_expire):
visible = client.zrevrangebyscore(
self.unacked_index_key, ceil, 0,
start=num and start, num=num, withscores=True)
for tag, score in visible or []:
self.restore_by_tag(tag, client)
except MutexHeld:
pass
def restore_by_tag(self, tag, client=None):
with self.channel.conn_or_acquire(client) as client:
p, _, _ = self._remove_from_indices(tag,
client.pipeline().hget(self.unacked_key, tag)).execute()
if p:
M, EX, RK = loads(p)
self.channel._do_restore_message(M, EX, RK, client)
@cached_property
def unacked_key(self):
return self.channel.unacked_key
@cached_property
def unacked_index_key(self):
return self.channel.unacked_index_key
@cached_property
def unacked_mutex_key(self):
return self.channel.unacked_mutex_key
@cached_property
def unacked_mutex_expire(self):
return self.channel.unacked_mutex_expire
@cached_property
def visibility_timeout(self):
return self.channel.visibility_timeout
class MultiChannelPoller(object):
eventflags = READ | ERR
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map.
self._fd_to_chan = {}
# channel -> socket map
self._chan_to_sock = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
def close(self):
for fd in self._chan_to_sock.itervalues():
try:
self.poller.unregister(fd)
except KeyError:
pass
self._channels.clear()
self._fd_to_chan.clear()
self._chan_to_sock.clear()
self.poller = None
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
def _register(self, channel, client, type):
if (channel, client, type) in self._chan_to_sock:
self._unregister(channel, client, type)
if client.connection._sock is None: # not connected yet.
client.connection.connect()
sock = client.connection._sock
self._fd_to_chan[sock.fileno()] = (channel, type)
self._chan_to_sock[(channel, client, type)] = sock
self.poller.register(sock, self.eventflags)
def _unregister(self, channel, client, type):
self.poller.unregister(self._chan_to_sock[(channel, client, type)])
def _register_BRPOP(self, channel):
"""enable BRPOP mode for channel."""
ident = channel, channel.client, 'BRPOP'
if channel.client.connection._sock is None or \
ident not in self._chan_to_sock:
channel._in_poll = False
self._register(*ident)
if not channel._in_poll: # send BRPOP
channel._brpop_start()
def _register_LISTEN(self, channel):
"""enable LISTEN mode for channel."""
if channel.subclient.connection._sock is None:
channel._in_listen = False
self._register(channel, channel.subclient, 'LISTEN')
if not channel._in_listen:
channel._subscribe() # send SUBSCRIBE
def on_poll_start(self):
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
def on_poll_init(self, poller):
self.poller = poller
for channel in self._channels:
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def on_poll_empty(self):
for channel in self._channels:
if channel.active_queues:
# only need to do this once, as they are not local to channel.
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def handle_event(self, fileno, event):
if event & READ:
chan, type = self._fd_to_chan[fileno]
if chan.qos.can_consume():
return chan.handlers[type](), self
elif event & ERR:
chan, type = self._fd_to_chan[fileno]
chan._poll_error(type)
def get(self, timeout=None):
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
events = self.poller.poll(timeout)
for fileno, event in events or []:
ret = self.handle_event(fileno, event)
if ret:
return ret
# - no new data, so try to restore messages.
# - reset active redis commands.
self.on_poll_empty()
raise Empty()
@property
def fds(self):
return self._fd_to_chan
class Channel(virtual.Channel):
QoS = QoS
_client = None
_subclient = None
supports_fanout = True
keyprefix_queue = '_kombu.binding.%s'
sep = '\x06\x16'
_in_poll = False
_in_listen = False
_fanout_queues = {}
unacked_key = 'unacked'
unacked_index_key = 'unacked_index'
unacked_mutex_key = 'unacked_mutex'
unacked_mutex_expire = 300 # 5 minutes
unacked_restore_limit = None
visibility_timeout = 3600 # 1 hour
priority_steps = PRIORITY_STEPS
max_connections = 10
_pool = None
from_transport_options = (virtual.Channel.from_transport_options
+ ('unacked_key',
'unacked_index_key',
'unacked_mutex_key',
'unacked_mutex_expire',
'visibility_timeout',
'unacked_restore_limit',
'max_connections',
'priority_steps'))
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
self._queue_cycle = []
self.Client = self._get_client()
self.ResponseError = self._get_response_error()
self.active_fanout_queues = set()
self.auto_delete_queues = set()
self._fanout_to_queue = {}
self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive}
# Evaluate connection.
try:
self.client.info()
except Exception:
if self._pool:
self._pool.disconnect()
raise
self.connection.cycle.add(self) # add to channel poller.
# copy errors, in case channel closed but threads still
# are still waiting for data.
self.connection_errors = self.connection.connection_errors
register_after_fork(self, self._after_fork)
def _after_fork(self):
if self._pool is not None:
self._pool.disconnect()
def _do_restore_message(self, payload, exchange, routing_key, client=None):
with self.conn_or_acquire(client) as client:
try:
try:
payload['headers']['redelivered'] = True
except KeyError:
pass
for queue in self._lookup(exchange, routing_key):
client.lpush(queue, dumps(payload))
except Exception:
logger.critical('Could not restore message: %r', payload,
exc_info=True)
def _restore(self, message, payload=None):
tag = message.delivery_tag
with self.conn_or_acquire() as client:
P, _ = client.pipeline() \
.hget(self.unacked_key, tag) \
.hdel(self.unacked_key, tag) \
.execute()
if P:
M, EX, RK = loads(P)
self._do_restore_message(M, EX, RK, client)
def _next_delivery_tag(self):
return uuid()
def basic_consume(self, queue, *args, **kwargs):
if queue in self._fanout_queues:
exchange = self._fanout_queues[queue]
self.active_fanout_queues.add(queue)
self._fanout_to_queue[exchange] = queue
ret = super(Channel, self).basic_consume(queue, *args, **kwargs)
self._update_cycle()
return ret
def basic_cancel(self, consumer_tag):
try:
queue = self._tag_to_queue[consumer_tag]
except KeyError:
return
try:
self.active_fanout_queues.discard(queue)
self._fanout_to_queue.pop(self._fanout_queues[queue])
except KeyError:
pass
ret = super(Channel, self).basic_cancel(consumer_tag)
self._update_cycle()
return ret
def _subscribe(self):
keys = [self._fanout_queues[queue]
for queue in self.active_fanout_queues]
if not keys:
return
c = self.subclient
if c.connection._sock is None:
c.connection.connect()
self._in_listen = True
self.subclient.subscribe(keys)
def _handle_message(self, client, r):
if r[0] == 'unsubscribe' and r[2] == 0:
client.subscribed = False
elif r[0] == 'pmessage':
return {'type': r[0], 'pattern': r[1],
'channel': r[2], 'data': r[3]}
else:
return {'type': r[0], 'pattern': None,
'channel': r[1], 'data': r[2]}
def _receive(self):
c = self.subclient
response = None
try:
response = c.parse_response()
except self.connection_errors:
self._in_listen = False
if response is not None:
payload = self._handle_message(c, response)
if payload['type'] == 'message':
return (loads(payload['data']),
self._fanout_to_queue[payload['channel']])
raise Empty()
def _brpop_start(self, timeout=1):
queues = self._consume_cycle()
if not queues:
return
keys = [self._q_for_pri(queue, pri) for pri in PRIORITY_STEPS
for queue in queues] + [timeout or 0]
self._in_poll = True
self.client.connection.send_command('BRPOP', *keys)
def _brpop_read(self, **options):
try:
try:
dest__item = self.client.parse_response(self.client.connection,
'BRPOP',
**options)
except self.connection_errors:
# if there's a ConnectionError, disconnect so the next
# iteration will reconnect automatically.
self.client.connection.disconnect()
raise Empty()
if dest__item:
dest, item = dest__item
dest = dest.rsplit(self.sep, 1)[0]
self._rotate_cycle(dest)
return loads(item), dest
else:
raise Empty()
finally:
self._in_poll = False
def _poll_error(self, type, **options):
try:
self.client.parse_response(type)
except self.connection_errors:
pass
def _get(self, queue):
with self.conn_or_acquire() as client:
for pri in PRIORITY_STEPS:
item = client.rpop(self._q_for_pri(queue, pri))
if item:
return loads(item)
raise Empty()
def _size(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.llen(self._q_for_pri(queue, pri))
sizes = cmds.execute()
return sum(size for size in sizes if isinstance(size, int))
def _q_for_pri(self, queue, pri):
pri = self.priority(pri)
return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', ''))
def priority(self, n):
steps = self.priority_steps
return steps[bisect(steps, n) - 1]
def _put(self, queue, message, **kwargs):
"""Deliver message."""
try:
pri = max(min(int(
message['properties']['delivery_info']['priority']), 9), 0)
except (TypeError, ValueError, KeyError):
pri = 0
with self.conn_or_acquire() as client:
client.lpush(self._q_for_pri(queue, pri), dumps(message))
def _put_fanout(self, exchange, message, **kwargs):
"""Deliver fanout message."""
with self.conn_or_acquire() as client:
client.publish(exchange, dumps(message))
def _new_queue(self, queue, auto_delete=False, **kwargs):
if auto_delete:
self.auto_delete_queues.add(queue)
def _queue_bind(self, exchange, routing_key, pattern, queue):
if self.typeof(exchange).type == 'fanout':
# Mark exchange as fanout.
self._fanout_queues[queue] = exchange
with self.conn_or_acquire() as client:
client.sadd(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
def _delete(self, queue, exchange, routing_key, pattern, *args):
self.auto_delete_queues.discard(queue)
with self.conn_or_acquire() as client:
client.srem(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.delete(self._q_for_pri(queue, pri))
cmds.execute()
def _has_queue(self, queue, **kwargs):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.exists(self._q_for_pri(queue, pri))
return any(cmds.execute())
def get_table(self, exchange):
key = self.keyprefix_queue % exchange
with self.conn_or_acquire() as client:
values = client.smembers(key)
if not values:
raise InconsistencyError(
'Queue list empty or key does not exist: %r' % (
self.keyprefix_queue % exchange))
return [tuple(val.split(self.sep)) for val in values]
def _purge(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
priq = self._q_for_pri(queue, pri)
cmds = cmds.llen(priq).delete(priq)
sizes = cmds.execute()
return sum(sizes[::2])
def close(self):
if self._pool:
self._pool.disconnect()
if not self.closed:
# remove from channel poller.
self.connection.cycle.discard(self)
# delete fanout bindings
for queue in self._fanout_queues:
if queue in self.auto_delete_queues:
self.queue_delete(queue)
# Close connections
for attr in 'client', 'subclient':
try:
self.__dict__[attr].connection.disconnect()
except (KeyError, AttributeError, self.ResponseError):
pass
super(Channel, self).close()
def _connparams(self):
conninfo = self.connection.client
database = conninfo.virtual_host
if not isinstance(database, int):
if not database or database == '/':
database = DEFAULT_DB
elif database.startswith('/'):
database = database[1:]
try:
database = int(database)
except ValueError:
raise ValueError(
'Database name must be int between 0 and limit - 1')
return {'host': conninfo.hostname or '127.0.0.1',
'port': conninfo.port or DEFAULT_PORT,
'db': database,
'password': conninfo.password,
'max_connections': self.max_connections}
def _create_client(self):
return self.Client(connection_pool=self.pool)
def _get_pool(self):
return redis.ConnectionPool(**self._connparams())
def _get_client(self):
if redis.VERSION < (2, 4, 4):
raise VersionMismatch(
'Redis transport requires redis-py versions 2.4.4 or later. '
'You have %r' % (redis.__version__, ))
# KombuRedis maintains a connection attribute on it's instance and
# uses that when executing commands
# This was added after redis-py was changed.
class KombuRedis(redis.Redis): # pragma: no cover
def __init__(self, *args, **kwargs):
super(KombuRedis, self).__init__(*args, **kwargs)
self.connection = self.connection_pool.get_connection('_')
def execute_command(self, *args, **options):
conn = self.connection
command_name = args[0]
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except redis.ConnectionError:
conn.disconnect()
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
return KombuRedis
@contextmanager
def conn_or_acquire(self, client=None):
if client:
yield client
else:
if self._in_poll:
client = self._create_client()
yield client
self.pool.release(client.connection)
else:
yield self.client
@property
def pool(self):
if self._pool is None:
self._pool = self._get_pool()
return self._pool
@cached_property
def client(self):
"""Client used to publish messages, BRPOP etc."""
return self._create_client()
@cached_property
def subclient(self):
"""Pub/Sub connection used to consume fanout queues."""
client = self._create_client()
pubsub = client.pubsub()
pool = pubsub.connection_pool
pubsub.connection = pool.get_connection('pubsub', pubsub.shard_hint)
return pubsub
def _update_cycle(self):
"""Update fair cycle between queues.
We cycle between queues fairly to make sure that
each queue is equally likely to be consumed from,
so that a very busy queue will not block others.
This works by using Redis's `BRPOP` command and
by rotating the most recently used queue to the
and of the list. See Kombu github issue #166 for
more discussion of this method.
"""
self._queue_cycle = list(self.active_queues)
def _consume_cycle(self):
"""Get a fresh list of queues from the queue cycle."""
active = len(self.active_queues)
return self._queue_cycle[0:active]
def _rotate_cycle(self, used):
"""
Move most recently used queue to end of list
"""
index = self._queue_cycle.index(used)
self._queue_cycle.append(self._queue_cycle.pop(index))
def _get_response_error(self):
from redis import exceptions
return exceptions.ResponseError
@property
def active_queues(self):
"""Set of queues being consumed from (excluding fanout queues)."""
return set(queue for queue in self._active_queues
if queue not in self.active_fanout_queues)
class Transport(virtual.Transport):
Channel = Channel
polling_interval = None # disable sleep between unsuccessful polls.
default_port = DEFAULT_PORT
supports_ev = True
driver_type = 'redis'
driver_name = 'redis'
def __init__(self, *args, **kwargs):
super(Transport, self).__init__(*args, **kwargs)
# Get redis-py exceptions.
self.connection_errors, self.channel_errors = self._get_errors()
# All channels share the same poller.
self.cycle = MultiChannelPoller()
def driver_version(self):
return redis.__version__
def on_poll_init(self, poller):
"""Called when hub starts."""
self.cycle.on_poll_init(poller)
def on_poll_start(self):
"""Called by hub before each ``poll()``"""
cycle = self.cycle
cycle.on_poll_start()
return dict((fd, self.handle_event) for fd in cycle.fds)
def on_poll_empty(self):
self.cycle.on_poll_empty()
def handle_event(self, fileno, event):
"""Handle AIO event for one of our file descriptors."""
ret = self.cycle.handle_event(fileno, event)
if ret:
item, channel = ret
message, queue = item
if not queue or queue not in self._callbacks:
raise KeyError(
"Received message for queue '%s' without consumers: %s" % (
queue, message))
self._callbacks[queue](message)
def _get_errors(self):
"""Utility to import redis-py's exceptions at runtime."""
from redis import exceptions
# This exception suddenly changed name between redis-py versions
if hasattr(exceptions, 'InvalidData'):
DataError = exceptions.InvalidData
else:
DataError = exceptions.DataError
return ((StdConnectionError,
exceptions.ConnectionError,
exceptions.AuthenticationError),
(DataError,
exceptions.InvalidResponse,
exceptions.ResponseError,
StdChannelError))
|
|
import datetime
import json
import os
import sys
import freezegun
import pretend
import pytest
from mock import patch
from pip._vendor import pkg_resources
from pip._internal import self_outdated_check
from pip._internal.index import InstallationCandidate
from pip._internal.network.session import PipSession
from pip._internal.self_outdated_check import (
SelfCheckState,
logger,
make_link_collector,
pip_self_version_check,
)
from tests.lib.path import Path
@pytest.mark.parametrize(
'find_links, no_index, suppress_no_index, expected', [
(['link1'], False, False,
(['link1'], ['default_url', 'url1', 'url2'])),
(['link1'], False, True, (['link1'], ['default_url', 'url1', 'url2'])),
(['link1'], True, False, (['link1'], [])),
# Passing suppress_no_index=True suppresses no_index=True.
(['link1'], True, True, (['link1'], ['default_url', 'url1', 'url2'])),
# Test options.find_links=False.
(False, False, False, ([], ['default_url', 'url1', 'url2'])),
],
)
def test_make_link_collector(
find_links, no_index, suppress_no_index, expected,
):
"""
:param expected: the expected (find_links, index_urls) values.
"""
expected_find_links, expected_index_urls = expected
session = PipSession()
options = pretend.stub(
find_links=find_links,
index_url='default_url',
extra_index_urls=['url1', 'url2'],
no_index=no_index,
)
link_collector = make_link_collector(
session, options=options, suppress_no_index=suppress_no_index,
)
assert link_collector.session is session
search_scope = link_collector.search_scope
assert search_scope.find_links == expected_find_links
assert search_scope.index_urls == expected_index_urls
@patch('pip._internal.utils.misc.expanduser')
def test_make_link_collector__find_links_expansion(mock_expanduser, tmpdir):
"""
Test "~" expansion in --find-links paths.
"""
# This is a mock version of expanduser() that expands "~" to the tmpdir.
def expand_path(path):
if path.startswith('~/'):
path = os.path.join(tmpdir, path[2:])
return path
mock_expanduser.side_effect = expand_path
session = PipSession()
options = pretend.stub(
find_links=['~/temp1', '~/temp2'],
index_url='default_url',
extra_index_urls=[],
no_index=False,
)
# Only create temp2 and not temp1 to test that "~" expansion only occurs
# when the directory exists.
temp2_dir = os.path.join(tmpdir, 'temp2')
os.mkdir(temp2_dir)
link_collector = make_link_collector(session, options=options)
search_scope = link_collector.search_scope
# Only ~/temp2 gets expanded. Also, the path is normalized when expanded.
expected_temp2_dir = os.path.normcase(temp2_dir)
assert search_scope.find_links == ['~/temp1', expected_temp2_dir]
assert search_scope.index_urls == ['default_url']
class MockBestCandidateResult(object):
def __init__(self, best):
self.best_candidate = best
class MockPackageFinder(object):
BASE_URL = 'https://pypi.org/simple/pip-{0}.tar.gz'
PIP_PROJECT_NAME = 'pip'
INSTALLATION_CANDIDATES = [
InstallationCandidate(PIP_PROJECT_NAME, '6.9.0',
BASE_URL.format('6.9.0')),
InstallationCandidate(PIP_PROJECT_NAME, '3.3.1',
BASE_URL.format('3.3.1')),
InstallationCandidate(PIP_PROJECT_NAME, '1.0',
BASE_URL.format('1.0')),
]
@classmethod
def create(cls, *args, **kwargs):
return cls()
def find_best_candidate(self, project_name):
return MockBestCandidateResult(self.INSTALLATION_CANDIDATES[0])
class MockDistribution(object):
def __init__(self, installer):
self.installer = installer
def has_metadata(self, name):
return name == 'INSTALLER'
def get_metadata_lines(self, name):
if self.has_metadata(name):
yield self.installer
else:
raise NotImplementedError('nope')
def _options():
''' Some default options that we pass to
self_outdated_check.pip_self_version_check '''
return pretend.stub(
find_links=[], index_url='default_url', extra_index_urls=[],
no_index=False, pre=False, cache_dir='',
)
@pytest.mark.parametrize(
[
'stored_time',
'installed_ver',
'new_ver',
'installer',
'check_if_upgrade_required',
'check_warn_logs',
],
[
# Test we return None when installed version is None
('1970-01-01T10:00:00Z', None, '1.0', 'pip', False, False),
# Need an upgrade - upgrade warning should print
('1970-01-01T10:00:00Z', '1.0', '6.9.0', 'pip', True, True),
# Upgrade available, pip installed via rpm - warning should not print
('1970-01-01T10:00:00Z', '1.0', '6.9.0', 'rpm', True, False),
# No upgrade - upgrade warning should not print
('1970-01-9T10:00:00Z', '6.9.0', '6.9.0', 'pip', False, False),
]
)
def test_pip_self_version_check(monkeypatch, stored_time, installed_ver,
new_ver, installer,
check_if_upgrade_required, check_warn_logs):
monkeypatch.setattr(self_outdated_check, 'get_installed_version',
lambda name: installed_ver)
monkeypatch.setattr(self_outdated_check, 'PackageFinder',
MockPackageFinder)
monkeypatch.setattr(logger, 'warning',
pretend.call_recorder(lambda *a, **kw: None))
monkeypatch.setattr(logger, 'debug',
pretend.call_recorder(lambda s, exc_info=None: None))
monkeypatch.setattr(pkg_resources, 'get_distribution',
lambda name: MockDistribution(installer))
fake_state = pretend.stub(
state={"last_check": stored_time, 'pypi_version': installed_ver},
save=pretend.call_recorder(lambda v, t: None),
)
monkeypatch.setattr(
self_outdated_check, 'SelfCheckState', lambda **kw: fake_state
)
with freezegun.freeze_time(
"1970-01-09 10:00:00",
ignore=[
"six.moves",
"pip._vendor.six.moves",
"pip._vendor.requests.packages.urllib3.packages.six.moves",
]
):
latest_pypi_version = pip_self_version_check(None, _options())
# See we return None if not installed_version
if not installed_ver:
assert not latest_pypi_version
# See that we saved the correct version
elif check_if_upgrade_required:
assert fake_state.save.calls == [
pretend.call(new_ver, datetime.datetime(1970, 1, 9, 10, 00, 00)),
]
else:
# Make sure no Exceptions
assert not logger.debug.calls
# See that save was not called
assert fake_state.save.calls == []
# Ensure we warn the user or not
if check_warn_logs:
assert len(logger.warning.calls) == 1
else:
assert len(logger.warning.calls) == 0
statefile_name_case_1 = (
"fcd2d5175dd33d5df759ee7b045264230205ef837bf9f582f7c3ada7"
)
statefile_name_case_2 = (
"902cecc0745b8ecf2509ba473f3556f0ba222fedc6df433acda24aa5"
)
@pytest.mark.parametrize("key,expected", [
("/hello/world/venv", statefile_name_case_1),
("C:\\Users\\User\\Desktop\\venv", statefile_name_case_2),
])
def test_get_statefile_name_known_values(key, expected):
assert expected == self_outdated_check._get_statefile_name(key)
def _get_statefile_path(cache_dir, key):
return os.path.join(
cache_dir, "selfcheck", self_outdated_check._get_statefile_name(key)
)
def test_self_check_state_no_cache_dir():
state = SelfCheckState(cache_dir=False)
assert state.state == {}
assert state.statefile_path is None
def test_self_check_state_key_uses_sys_prefix(monkeypatch):
key = "helloworld"
monkeypatch.setattr(sys, "prefix", key)
state = self_outdated_check.SelfCheckState("")
assert state.key == key
def test_self_check_state_reads_expected_statefile(monkeypatch, tmpdir):
cache_dir = tmpdir / "cache_dir"
cache_dir.mkdir()
key = "helloworld"
statefile_path = _get_statefile_path(str(cache_dir), key)
last_check = "1970-01-02T11:00:00Z"
pypi_version = "1.0"
content = {
"key": key,
"last_check": last_check,
"pypi_version": pypi_version,
}
Path(statefile_path).parent.mkdir()
with open(statefile_path, "w") as f:
json.dump(content, f)
monkeypatch.setattr(sys, "prefix", key)
state = self_outdated_check.SelfCheckState(str(cache_dir))
assert state.state["last_check"] == last_check
assert state.state["pypi_version"] == pypi_version
def test_self_check_state_writes_expected_statefile(monkeypatch, tmpdir):
cache_dir = tmpdir / "cache_dir"
cache_dir.mkdir()
key = "helloworld"
statefile_path = _get_statefile_path(str(cache_dir), key)
last_check = datetime.datetime.strptime(
"1970-01-02T11:00:00Z", self_outdated_check.SELFCHECK_DATE_FMT
)
pypi_version = "1.0"
monkeypatch.setattr(sys, "prefix", key)
state = self_outdated_check.SelfCheckState(str(cache_dir))
state.save(pypi_version, last_check)
with open(statefile_path) as f:
saved = json.load(f)
expected = {
"key": key,
"last_check": last_check.strftime(
self_outdated_check.SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
assert expected == saved
|
|
# kae.py
#
# Generated on Sun Dec 30 17:39:53 +0000 2007
# AEDataModel.h
typeBoolean = b'bool'
typeChar = b'TEXT'
typeStyledUnicodeText = b'sutx'
typeEncodedString = b'encs'
typeUnicodeText = b'utxt'
typeCString = b'cstr'
typePString = b'pstr'
typeUTF16ExternalRepresentation = b'ut16'
typeUTF8Text = b'utf8'
typeSInt16 = b'shor'
typeUInt16 = b'ushr'
typeSInt32 = b'long'
typeUInt32 = b'magn'
typeSInt64 = b'comp'
typeUInt64 = b'ucom'
typeIEEE32BitFloatingPoint = b'sing'
typeIEEE64BitFloatingPoint = b'doub'
type128BitFloatingPoint = b'ldbl'
typeDecimalStruct = b'decm'
typeSMInt = typeSInt16
typeShortInteger = typeSInt16
typeInteger = typeSInt32
typeLongInteger = typeSInt32
typeMagnitude = typeUInt32
typeComp = typeSInt64
typeSMFloat = typeIEEE32BitFloatingPoint
typeShortFloat = typeIEEE32BitFloatingPoint
typeFloat = typeIEEE64BitFloatingPoint
typeLongFloat = typeIEEE64BitFloatingPoint
typeExtended = b'exte'
typeAEList = b'list'
typeAERecord = b'reco'
typeAppleEvent = b'aevt'
typeEventRecord = b'evrc'
typeTrue = b'true'
typeFalse = b'fals'
typeAlias = b'alis'
typeEnumerated = b'enum'
typeType = b'type'
typeAppParameters = b'appa'
typeProperty = b'prop'
typeFSRef = b'fsrf'
typeFileURL = b'furl'
typeKeyword = b'keyw'
typeSectionH = b'sect'
typeWildCard = b'****'
typeApplSignature = b'sign'
typeQDRectangle = b'qdrt'
typeFixed = b'fixd'
typeProcessSerialNumber = b'psn '
typeApplicationURL = b'aprl'
typeNull = b'null'
typeFSS = b'fss '
typeCFAttributedStringRef = b'cfas'
typeCFMutableAttributedStringRef = b'cfaa'
typeCFStringRef = b'cfst'
typeCFMutableStringRef = b'cfms'
typeCFArrayRef = b'cfar'
typeCFMutableArrayRef = b'cfma'
typeCFDictionaryRef = b'cfdc'
typeCFMutableDictionaryRef = b'cfmd'
typeCFNumberRef = b'cfnb'
typeCFBooleanRef = b'cftf'
typeCFTypeRef = b'cfty'
typeKernelProcessID = b'kpid'
typeMachPort = b'port'
typeApplicationBundleID = b'bund'
keyTransactionIDAttr = b'tran'
keyReturnIDAttr = b'rtid'
keyEventClassAttr = b'evcl'
keyEventIDAttr = b'evid'
keyAddressAttr = b'addr'
keyOptionalKeywordAttr = b'optk'
keyTimeoutAttr = b'timo'
keyInteractLevelAttr = b'inte'
keyEventSourceAttr = b'esrc'
keyMissedKeywordAttr = b'miss'
keyOriginalAddressAttr = b'from'
keyAcceptTimeoutAttr = b'actm'
keyReplyRequestedAttr = b'repq'
kAEDebugPOSTHeader = (1 << 0)
kAEDebugReplyHeader = (1 << 1)
kAEDebugXMLRequest = (1 << 2)
kAEDebugXMLResponse = (1 << 3)
kAEDebugXMLDebugAll = 0xFFFFFFFF
kSOAP1999Schema = b'ss99'
kSOAP2001Schema = b'ss01'
keyUserNameAttr = b'unam'
keyUserPasswordAttr = b'pass'
keyDisableAuthenticationAttr = b'auth'
keyXMLDebuggingAttr = b'xdbg'
kAERPCClass = b'rpc '
kAEXMLRPCScheme = b'RPC2'
kAESOAPScheme = b'SOAP'
kAESharedScriptHandler = b'wscp'
keyRPCMethodName = b'meth'
keyRPCMethodParam = b'parm'
keyRPCMethodParamOrder = b'/ord'
keyAEPOSTHeaderData = b'phed'
keyAEReplyHeaderData = b'rhed'
keyAEXMLRequestData = b'xreq'
keyAEXMLReplyData = b'xrep'
keyAdditionalHTTPHeaders = b'ahed'
keySOAPAction = b'sact'
keySOAPMethodNameSpace = b'mspc'
keySOAPMethodNameSpaceURI = b'mspu'
keySOAPSchemaVersion = b'ssch'
keySOAPStructureMetaData = b'/smd'
keySOAPSMDNamespace = b'ssns'
keySOAPSMDNamespaceURI = b'ssnu'
keySOAPSMDType = b'sstp'
kAEUseHTTPProxyAttr = b'xupr'
kAEHTTPProxyPortAttr = b'xhtp'
kAEHTTPProxyHostAttr = b'xhth'
kAESocks4Protocol = 4
kAESocks5Protocol = 5
kAEUseSocksAttr = b'xscs'
kAESocksProxyAttr = b'xsok'
kAESocksHostAttr = b'xshs'
kAESocksPortAttr = b'xshp'
kAESocksUserAttr = b'xshu'
kAESocksPasswordAttr = b'xshw'
kAEDescListFactorNone = 0
kAEDescListFactorType = 4
kAEDescListFactorTypeAndSize = 8
kAutoGenerateReturnID = -1
kAnyTransactionID = 0
kAEDataArray = 0
kAEPackedArray = 1
kAEDescArray = 3
kAEKeyDescArray = 4
kAEHandleArray = 2
kAENormalPriority = 0x00000000
kAEHighPriority = 0x00000001
kAENoReply = 0x00000001
kAEQueueReply = 0x00000002
kAEWaitReply = 0x00000003
kAEDontReconnect = 0x00000080
kAEWantReceipt = 0x00000200
kAENeverInteract = 0x00000010
kAECanInteract = 0x00000020
kAEAlwaysInteract = 0x00000030
kAECanSwitchLayer = 0x00000040
kAEDontRecord = 0x00001000
kAEDontExecute = 0x00002000
kAEProcessNonReplyEvents = 0x00008000
kAEDefaultTimeout = -1
kNoTimeOut = -2
# AEHelpers.h
aeBuildSyntaxNoErr = 0
aeBuildSyntaxBadToken = 1
aeBuildSyntaxBadEOF = 2
aeBuildSyntaxNoEOF = 3
aeBuildSyntaxBadNegative = 4
aeBuildSyntaxMissingQuote = 5
aeBuildSyntaxBadHex = 6
aeBuildSyntaxOddHex = 7
aeBuildSyntaxNoCloseHex = 8
aeBuildSyntaxUncoercedHex = 9
aeBuildSyntaxNoCloseString = 10
aeBuildSyntaxBadDesc = 11
aeBuildSyntaxBadData = 12
aeBuildSyntaxNoCloseParen = 13
aeBuildSyntaxNoCloseBracket = 14
aeBuildSyntaxNoCloseBrace = 15
aeBuildSyntaxNoKey = 16
aeBuildSyntaxNoColon = 17
aeBuildSyntaxCoercedList = 18
aeBuildSyntaxUncoercedDoubleAt = 19
# AEMach.h
keyReplyPortAttr = b'repp'
typeReplyPortAttr = keyReplyPortAttr
# AEObjects.h
kAEAND = b'AND '
kAEOR = b'OR '
kAENOT = b'NOT '
kAEFirst = b'firs'
kAELast = b'last'
kAEMiddle = b'midd'
kAEAny = b'any '
kAEAll = b'all '
kAENext = b'next'
kAEPrevious = b'prev'
keyAECompOperator = b'relo'
keyAELogicalTerms = b'term'
keyAELogicalOperator = b'logc'
keyAEObject1 = b'obj1'
keyAEObject2 = b'obj2'
keyAEDesiredClass = b'want'
keyAEContainer = b'from'
keyAEKeyForm = b'form'
keyAEKeyData = b'seld'
keyAERangeStart = b'star'
keyAERangeStop = b'stop'
keyDisposeTokenProc = b'xtok'
keyAECompareProc = b'cmpr'
keyAECountProc = b'cont'
keyAEMarkTokenProc = b'mkid'
keyAEMarkProc = b'mark'
keyAEAdjustMarksProc = b'adjm'
keyAEGetErrDescProc = b'indc'
formAbsolutePosition = b'indx'
formRelativePosition = b'rele'
formTest = b'test'
formRange = b'rang'
formPropertyID = b'prop'
formName = b'name'
formUniqueID = b'ID '
typeObjectSpecifier = b'obj '
typeObjectBeingExamined = b'exmn'
typeCurrentContainer = b'ccnt'
typeToken = b'toke'
typeRelativeDescriptor = b'rel '
typeAbsoluteOrdinal = b'abso'
typeIndexDescriptor = b'inde'
typeRangeDescriptor = b'rang'
typeLogicalDescriptor = b'logi'
typeCompDescriptor = b'cmpd'
typeOSLTokenList = b'ostl'
kAEIDoMinimum = 0x0000
kAEIDoWhose = 0x0001
kAEIDoMarking = 0x0004
kAEPassSubDescs = 0x0008
kAEResolveNestedLists = 0x0010
kAEHandleSimpleRanges = 0x0020
kAEUseRelativeIterators = 0x0040
typeWhoseDescriptor = b'whos'
formWhose = b'whos'
typeWhoseRange = b'wrng'
keyAEWhoseRangeStart = b'wstr'
keyAEWhoseRangeStop = b'wstp'
keyAEIndex = b'kidx'
keyAETest = b'ktst'
# AEPackObject.h
# AERegistry.h
cAEList = b'list'
cApplication = b'capp'
cArc = b'carc'
cBoolean = b'bool'
cCell = b'ccel'
cChar = b'cha '
cColorTable = b'clrt'
cColumn = b'ccol'
cDocument = b'docu'
cDrawingArea = b'cdrw'
cEnumeration = b'enum'
cFile = b'file'
cFixed = b'fixd'
cFixedPoint = b'fpnt'
cFixedRectangle = b'frct'
cGraphicLine = b'glin'
cGraphicObject = b'cgob'
cGraphicShape = b'cgsh'
cGraphicText = b'cgtx'
cGroupedGraphic = b'cpic'
cInsertionLoc = b'insl'
cInsertionPoint = b'cins'
cIntlText = b'itxt'
cIntlWritingCode = b'intl'
cItem = b'citm'
cLine = b'clin'
cLongDateTime = b'ldt '
cLongFixed = b'lfxd'
cLongFixedPoint = b'lfpt'
cLongFixedRectangle = b'lfrc'
cLongInteger = b'long'
cLongPoint = b'lpnt'
cLongRectangle = b'lrct'
cMachineLoc = b'mLoc'
cMenu = b'cmnu'
cMenuItem = b'cmen'
cObject = b'cobj'
cObjectSpecifier = b'obj '
cOpenableObject = b'coob'
cOval = b'covl'
cParagraph = b'cpar'
cPICT = b'PICT'
cPixel = b'cpxl'
cPixelMap = b'cpix'
cPolygon = b'cpgn'
cProperty = b'prop'
cQDPoint = b'QDpt'
cQDRectangle = b'qdrt'
cRectangle = b'crec'
cRGBColor = b'cRGB'
cRotation = b'trot'
cRoundedRectangle = b'crrc'
cRow = b'crow'
cSelection = b'csel'
cShortInteger = b'shor'
cTable = b'ctbl'
cText = b'ctxt'
cTextFlow = b'cflo'
cTextStyles = b'tsty'
cType = b'type'
cVersion = b'vers'
cWindow = b'cwin'
cWord = b'cwor'
enumArrows = b'arro'
enumJustification = b'just'
enumKeyForm = b'kfrm'
enumPosition = b'posi'
enumProtection = b'prtn'
enumQuality = b'qual'
enumSaveOptions = b'savo'
enumStyle = b'styl'
enumTransferMode = b'tran'
kAEAbout = b'abou'
kAEAfter = b'afte'
kAEAliasSelection = b'sali'
kAEAllCaps = b'alcp'
kAEArrowAtEnd = b'aren'
kAEArrowAtStart = b'arst'
kAEArrowBothEnds = b'arbo'
kAEAsk = b'ask '
kAEBefore = b'befo'
kAEBeginning = b'bgng'
kAEBeginsWith = b'bgwt'
kAEBeginTransaction = b'begi'
kAEBold = b'bold'
kAECaseSensEquals = b'cseq'
kAECentered = b'cent'
kAEChangeView = b'view'
kAEClone = b'clon'
kAEClose = b'clos'
kAECondensed = b'cond'
kAEContains = b'cont'
kAECopy = b'copy'
kAECoreSuite = b'core'
kAECountElements = b'cnte'
kAECreateElement = b'crel'
kAECreatePublisher = b'cpub'
kAECut = b'cut '
kAEDelete = b'delo'
kAEDoObjectsExist = b'doex'
kAEDoScript = b'dosc'
kAEDrag = b'drag'
kAEDuplicateSelection = b'sdup'
kAEEditGraphic = b'edit'
kAEEmptyTrash = b'empt'
kAEEnd = b'end '
kAEEndsWith = b'ends'
kAEEndTransaction = b'endt'
kAEEquals = b'= '
kAEExpanded = b'pexp'
kAEFast = b'fast'
kAEFinderEvents = b'FNDR'
kAEFormulaProtect = b'fpro'
kAEFullyJustified = b'full'
kAEGetClassInfo = b'qobj'
kAEGetData = b'getd'
kAEGetDataSize = b'dsiz'
kAEGetEventInfo = b'gtei'
kAEGetInfoSelection = b'sinf'
kAEGetPrivilegeSelection = b'sprv'
kAEGetSuiteInfo = b'gtsi'
kAEGreaterThan = b'> '
kAEGreaterThanEquals = b'>= '
kAEGrow = b'grow'
kAEHidden = b'hidn'
kAEHiQuality = b'hiqu'
kAEImageGraphic = b'imgr'
kAEIsUniform = b'isun'
kAEItalic = b'ital'
kAELeftJustified = b'left'
kAELessThan = b'< '
kAELessThanEquals = b'<= '
kAELowercase = b'lowc'
kAEMakeObjectsVisible = b'mvis'
kAEMiscStandards = b'misc'
kAEModifiable = b'modf'
kAEMove = b'move'
kAENo = b'no '
kAENoArrow = b'arno'
kAENonmodifiable = b'nmod'
kAEOpen = b'odoc'
kAEOpenSelection = b'sope'
kAEOutline = b'outl'
kAEPageSetup = b'pgsu'
kAEPaste = b'past'
kAEPlain = b'plan'
kAEPrint = b'pdoc'
kAEPrintSelection = b'spri'
kAEPrintWindow = b'pwin'
kAEPutAwaySelection = b'sput'
kAEQDAddOver = b'addo'
kAEQDAddPin = b'addp'
kAEQDAdMax = b'admx'
kAEQDAdMin = b'admn'
kAEQDBic = b'bic '
kAEQDBlend = b'blnd'
kAEQDCopy = b'cpy '
kAEQDNotBic = b'nbic'
kAEQDNotCopy = b'ncpy'
kAEQDNotOr = b'ntor'
kAEQDNotXor = b'nxor'
kAEQDOr = b'or '
kAEQDSubOver = b'subo'
kAEQDSubPin = b'subp'
kAEQDSupplementalSuite = b'qdsp'
kAEQDXor = b'xor '
kAEQuickdrawSuite = b'qdrw'
kAEQuitAll = b'quia'
kAERedo = b'redo'
kAERegular = b'regl'
kAEReopenApplication = b'rapp'
kAEReplace = b'rplc'
kAERequiredSuite = b'reqd'
kAERestart = b'rest'
kAERevealSelection = b'srev'
kAERevert = b'rvrt'
kAERightJustified = b'rght'
kAESave = b'save'
kAESelect = b'slct'
kAESetData = b'setd'
kAESetPosition = b'posn'
kAEShadow = b'shad'
kAEShowClipboard = b'shcl'
kAEShutDown = b'shut'
kAESleep = b'slep'
kAESmallCaps = b'smcp'
kAESpecialClassProperties = b'c@#!'
kAEStrikethrough = b'strk'
kAESubscript = b'sbsc'
kAESuperscript = b'spsc'
kAETableSuite = b'tbls'
kAETextSuite = b'TEXT'
kAETransactionTerminated = b'ttrm'
kAEUnderline = b'undl'
kAEUndo = b'undo'
kAEWholeWordEquals = b'wweq'
kAEYes = b'yes '
kAEZoom = b'zoom'
kAELogOut = b'logo'
kAEReallyLogOut = b'rlgo'
kAEShowRestartDialog = b'rrst'
kAEShowShutdownDialog = b'rsdn'
kAEMouseClass = b'mous'
kAEDown = b'down'
kAEUp = b'up '
kAEMoved = b'move'
kAEStoppedMoving = b'stop'
kAEWindowClass = b'wind'
kAEUpdate = b'updt'
kAEActivate = b'actv'
kAEDeactivate = b'dact'
kAECommandClass = b'cmnd'
kAEKeyClass = b'keyc'
kAERawKey = b'rkey'
kAEVirtualKey = b'keyc'
kAENavigationKey = b'nave'
kAEAutoDown = b'auto'
kAEApplicationClass = b'appl'
kAESuspend = b'susp'
kAEResume = b'rsme'
kAEDiskEvent = b'disk'
kAENullEvent = b'null'
kAEWakeUpEvent = b'wake'
kAEScrapEvent = b'scrp'
kAEHighLevel = b'high'
keyAEAngle = b'kang'
keyAEArcAngle = b'parc'
keyAEBaseAddr = b'badd'
keyAEBestType = b'pbst'
keyAEBgndColor = b'kbcl'
keyAEBgndPattern = b'kbpt'
keyAEBounds = b'pbnd'
keyAECellList = b'kclt'
keyAEClassID = b'clID'
keyAEColor = b'colr'
keyAEColorTable = b'cltb'
keyAECurveHeight = b'kchd'
keyAECurveWidth = b'kcwd'
keyAEDashStyle = b'pdst'
keyAEData = b'data'
keyAEDefaultType = b'deft'
keyAEDefinitionRect = b'pdrt'
keyAEDescType = b'dstp'
keyAEDestination = b'dest'
keyAEDoAntiAlias = b'anta'
keyAEDoDithered = b'gdit'
keyAEDoRotate = b'kdrt'
keyAEDoScale = b'ksca'
keyAEDoTranslate = b'ktra'
keyAEEditionFileLoc = b'eloc'
keyAEElements = b'elms'
keyAEEndPoint = b'pend'
keyAEEventClass = b'evcl'
keyAEEventID = b'evti'
keyAEFile = b'kfil'
keyAEFileType = b'fltp'
keyAEFillColor = b'flcl'
keyAEFillPattern = b'flpt'
keyAEFlipHorizontal = b'kfho'
keyAEFlipVertical = b'kfvt'
keyAEFont = b'font'
keyAEFormula = b'pfor'
keyAEGraphicObjects = b'gobs'
keyAEID = b'ID '
keyAEImageQuality = b'gqua'
keyAEInsertHere = b'insh'
keyAEKeyForms = b'keyf'
keyAEKeyword = b'kywd'
keyAELevel = b'levl'
keyAELineArrow = b'arro'
keyAEName = b'pnam'
keyAENewElementLoc = b'pnel'
keyAEObject = b'kobj'
keyAEObjectClass = b'kocl'
keyAEOffStyles = b'ofst'
keyAEOnStyles = b'onst'
keyAEParameters = b'prms'
keyAEParamFlags = b'pmfg'
keyAEPenColor = b'ppcl'
keyAEPenPattern = b'pppa'
keyAEPenWidth = b'ppwd'
keyAEPixelDepth = b'pdpt'
keyAEPixMapMinus = b'kpmm'
keyAEPMTable = b'kpmt'
keyAEPointList = b'ptlt'
keyAEPointSize = b'ptsz'
keyAEPosition = b'kpos'
keyAEPropData = b'prdt'
keyAEProperties = b'qpro'
keyAEProperty = b'kprp'
keyAEPropFlags = b'prfg'
keyAEPropID = b'prop'
keyAEProtection = b'ppro'
keyAERenderAs = b'kren'
keyAERequestedType = b'rtyp'
keyAEResult = b'----'
keyAEResultInfo = b'rsin'
keyAERotation = b'prot'
keyAERotPoint = b'krtp'
keyAERowList = b'krls'
keyAESaveOptions = b'savo'
keyAEScale = b'pscl'
keyAEScriptTag = b'psct'
keyAESearchText = b'stxt'
keyAEShowWhere = b'show'
keyAEStartAngle = b'pang'
keyAEStartPoint = b'pstp'
keyAEStyles = b'ksty'
keyAESuiteID = b'suit'
keyAEText = b'ktxt'
keyAETextColor = b'ptxc'
keyAETextFont = b'ptxf'
keyAETextPointSize = b'ptps'
keyAETextStyles = b'txst'
keyAETextLineHeight = b'ktlh'
keyAETextLineAscent = b'ktas'
keyAETheText = b'thtx'
keyAETransferMode = b'pptm'
keyAETranslation = b'ptrs'
keyAETryAsStructGraf = b'toog'
keyAEUniformStyles = b'ustl'
keyAEUpdateOn = b'pupd'
keyAEUserTerm = b'utrm'
keyAEWindow = b'wndw'
keyAEWritingCode = b'wrcd'
keyMiscellaneous = b'fmsc'
keySelection = b'fsel'
keyWindow = b'kwnd'
keyWhen = b'when'
keyWhere = b'wher'
keyModifiers = b'mods'
keyKey = b'key '
keyKeyCode = b'code'
keyKeyboard = b'keyb'
keyDriveNumber = b'drv#'
keyErrorCode = b'err#'
keyHighLevelClass = b'hcls'
keyHighLevelID = b'hid '
pArcAngle = b'parc'
pBackgroundColor = b'pbcl'
pBackgroundPattern = b'pbpt'
pBestType = b'pbst'
pBounds = b'pbnd'
pClass = b'pcls'
pClipboard = b'pcli'
pColor = b'colr'
pColorTable = b'cltb'
pContents = b'pcnt'
pCornerCurveHeight = b'pchd'
pCornerCurveWidth = b'pcwd'
pDashStyle = b'pdst'
pDefaultType = b'deft'
pDefinitionRect = b'pdrt'
pEnabled = b'enbl'
pEndPoint = b'pend'
pFillColor = b'flcl'
pFillPattern = b'flpt'
pFont = b'font'
pFormula = b'pfor'
pGraphicObjects = b'gobs'
pHasCloseBox = b'hclb'
pHasTitleBar = b'ptit'
pID = b'ID '
pIndex = b'pidx'
pInsertionLoc = b'pins'
pIsFloating = b'isfl'
pIsFrontProcess = b'pisf'
pIsModal = b'pmod'
pIsModified = b'imod'
pIsResizable = b'prsz'
pIsStationeryPad = b'pspd'
pIsZoomable = b'iszm'
pIsZoomed = b'pzum'
pItemNumber = b'itmn'
pJustification = b'pjst'
pLineArrow = b'arro'
pMenuID = b'mnid'
pName = b'pnam'
pNewElementLoc = b'pnel'
pPenColor = b'ppcl'
pPenPattern = b'pppa'
pPenWidth = b'ppwd'
pPixelDepth = b'pdpt'
pPointList = b'ptlt'
pPointSize = b'ptsz'
pProtection = b'ppro'
pRotation = b'prot'
pScale = b'pscl'
pScript = b'scpt'
pScriptTag = b'psct'
pSelected = b'selc'
pSelection = b'sele'
pStartAngle = b'pang'
pStartPoint = b'pstp'
pTextColor = b'ptxc'
pTextFont = b'ptxf'
pTextItemDelimiters = b'txdl'
pTextPointSize = b'ptps'
pTextStyles = b'txst'
pTransferMode = b'pptm'
pTranslation = b'ptrs'
pUniformStyles = b'ustl'
pUpdateOn = b'pupd'
pUserSelection = b'pusl'
pVersion = b'vers'
pVisible = b'pvis'
typeAEText = b'tTXT'
typeArc = b'carc'
typeBest = b'best'
typeCell = b'ccel'
typeClassInfo = b'gcli'
typeColorTable = b'clrt'
typeColumn = b'ccol'
typeDashStyle = b'tdas'
typeData = b'tdta'
typeDrawingArea = b'cdrw'
typeElemInfo = b'elin'
typeEnumeration = b'enum'
typeEPS = b'EPS '
typeEventInfo = b'evin'
typeFinderWindow = b'fwin'
typeFixedPoint = b'fpnt'
typeFixedRectangle = b'frct'
typeGraphicLine = b'glin'
typeGraphicText = b'cgtx'
typeGroupedGraphic = b'cpic'
typeInsertionLoc = b'insl'
typeIntlText = b'itxt'
typeIntlWritingCode = b'intl'
typeLongDateTime = b'ldt '
typeCFAbsoluteTime = b'cfat'
typeISO8601DateTime = b'isot'
typeLongFixed = b'lfxd'
typeLongFixedPoint = b'lfpt'
typeLongFixedRectangle = b'lfrc'
typeLongPoint = b'lpnt'
typeLongRectangle = b'lrct'
typeMachineLoc = b'mLoc'
typeOval = b'covl'
typeParamInfo = b'pmin'
typePict = b'PICT'
typePixelMap = b'cpix'
typePixMapMinus = b'tpmm'
typePolygon = b'cpgn'
typePropInfo = b'pinf'
typePtr = b'ptr '
typeQDPoint = b'QDpt'
typeQDRegion = b'Qrgn'
typeRectangle = b'crec'
typeRGB16 = b'tr16'
typeRGB96 = b'tr96'
typeRGBColor = b'cRGB'
typeRotation = b'trot'
typeRoundedRectangle = b'crrc'
typeRow = b'crow'
typeScrapStyles = b'styl'
typeScript = b'scpt'
typeStyledText = b'STXT'
typeSuiteInfo = b'suin'
typeTable = b'ctbl'
typeTextStyles = b'tsty'
typeTIFF = b'TIFF'
typeJPEG = b'JPEG'
typeGIF = b'GIFf'
typeVersion = b'vers'
kAEMenuClass = b'menu'
kAEMenuSelect = b'mhit'
kAEMouseDown = b'mdwn'
kAEMouseDownInBack = b'mdbk'
kAEKeyDown = b'kdwn'
kAEResized = b'rsiz'
kAEPromise = b'prom'
keyMenuID = b'mid '
keyMenuItem = b'mitm'
keyCloseAllWindows = b'caw '
keyOriginalBounds = b'obnd'
keyNewBounds = b'nbnd'
keyLocalWhere = b'lwhr'
typeHIMenu = b'mobj'
typeHIWindow = b'wobj'
kBySmallIcon = 0
kByIconView = 1
kByNameView = 2
kByDateView = 3
kBySizeView = 4
kByKindView = 5
kByCommentView = 6
kByLabelView = 7
kByVersionView = 8
kAEInfo = 11
kAEMain = 0
kAESharing = 13
kAEZoomIn = 7
kAEZoomOut = 8
kTextServiceClass = b'tsvc'
kUpdateActiveInputArea = b'updt'
kShowHideInputWindow = b'shiw'
kPos2Offset = b'p2st'
kOffset2Pos = b'st2p'
kUnicodeNotFromInputMethod = b'unim'
kGetSelectedText = b'gtxt'
keyAETSMDocumentRefcon = b'refc'
keyAEServerInstance = b'srvi'
keyAETheData = b'kdat'
keyAEFixLength = b'fixl'
keyAEUpdateRange = b'udng'
keyAECurrentPoint = b'cpos'
keyAEBufferSize = b'buff'
keyAEMoveView = b'mvvw'
keyAENextBody = b'nxbd'
keyAETSMScriptTag = b'sclg'
keyAETSMTextFont = b'ktxf'
keyAETSMTextFMFont = b'ktxm'
keyAETSMTextPointSize = b'ktps'
keyAETSMEventRecord = b'tevt'
keyAETSMEventRef = b'tevr'
keyAETextServiceEncoding = b'tsen'
keyAETextServiceMacEncoding = b'tmen'
keyAETSMGlyphInfoArray = b'tgia'
typeTextRange = b'txrn'
typeComponentInstance = b'cmpi'
typeOffsetArray = b'ofay'
typeTextRangeArray = b'tray'
typeLowLevelEventRecord = b'evtr'
typeGlyphInfoArray = b'glia'
typeEventRef = b'evrf'
typeText = typeChar
kTSMOutsideOfBody = 1
kTSMInsideOfBody = 2
kTSMInsideOfActiveInputArea = 3
kNextBody = 1
kPreviousBody = 2
kTSMHiliteCaretPosition = 1
kTSMHiliteRawText = 2
kTSMHiliteSelectedRawText = 3
kTSMHiliteConvertedText = 4
kTSMHiliteSelectedConvertedText = 5
kTSMHiliteBlockFillText = 6
kTSMHiliteOutlineText = 7
kTSMHiliteSelectedText = 8
kTSMHiliteNoHilite = 9
kCaretPosition = kTSMHiliteCaretPosition
kRawText = kTSMHiliteRawText
kSelectedRawText = kTSMHiliteSelectedRawText
kConvertedText = kTSMHiliteConvertedText
kSelectedConvertedText = kTSMHiliteSelectedConvertedText
kBlockFillText = kTSMHiliteBlockFillText
kOutlineText = kTSMHiliteOutlineText
kSelectedText = kTSMHiliteSelectedText
keyAEHiliteRange = b'hrng'
keyAEPinRange = b'pnrg'
keyAEClauseOffsets = b'clau'
keyAEOffset = b'ofst'
keyAEPoint = b'gpos'
keyAELeftSide = b'klef'
keyAERegionClass = b'rgnc'
keyAEDragging = b'bool'
keyAELeadingEdge = keyAELeftSide
typeMeters = b'metr'
typeInches = b'inch'
typeFeet = b'feet'
typeYards = b'yard'
typeMiles = b'mile'
typeKilometers = b'kmtr'
typeCentimeters = b'cmtr'
typeSquareMeters = b'sqrm'
typeSquareFeet = b'sqft'
typeSquareYards = b'sqyd'
typeSquareMiles = b'sqmi'
typeSquareKilometers = b'sqkm'
typeLiters = b'litr'
typeQuarts = b'qrts'
typeGallons = b'galn'
typeCubicMeters = b'cmet'
typeCubicFeet = b'cfet'
typeCubicInches = b'cuin'
typeCubicCentimeter = b'ccmt'
typeCubicYards = b'cyrd'
typeKilograms = b'kgrm'
typeGrams = b'gram'
typeOunces = b'ozs '
typePounds = b'lbs '
typeDegreesC = b'degc'
typeDegreesF = b'degf'
typeDegreesK = b'degk'
kFAServerApp = b'ssrv'
kDoFolderActionEvent = b'fola'
kFolderActionCode = b'actn'
kFolderOpenedEvent = b'fopn'
kFolderClosedEvent = b'fclo'
kFolderWindowMovedEvent = b'fsiz'
kFolderItemsAddedEvent = b'fget'
kFolderItemsRemovedEvent = b'flos'
kItemList = b'flst'
kNewSizeParameter = b'fnsz'
kFASuiteCode = b'faco'
kFAAttachCommand = b'atfa'
kFARemoveCommand = b'rmfa'
kFAEditCommand = b'edfa'
kFAFileParam = b'faal'
kFAIndexParam = b'indx'
kAEInternetSuite = b'gurl'
kAEISWebStarSuite = b'WWW\xBD'
kAEISGetURL = b'gurl'
KAEISHandleCGI = b'sdoc'
cURL = b'url '
cInternetAddress = b'IPAD'
cHTML = b'html'
cFTPItem = b'ftp '
kAEISHTTPSearchArgs = b'kfor'
kAEISPostArgs = b'post'
kAEISMethod = b'meth'
kAEISClientAddress = b'addr'
kAEISUserName = b'user'
kAEISPassword = b'pass'
kAEISFromUser = b'frmu'
kAEISServerName = b'svnm'
kAEISServerPort = b'svpt'
kAEISScriptName = b'scnm'
kAEISContentType = b'ctyp'
kAEISReferrer = b'refr'
kAEISUserAgent = b'Agnt'
kAEISAction = b'Kact'
kAEISActionPath = b'Kapt'
kAEISClientIP = b'Kcip'
kAEISFullRequest = b'Kfrq'
pScheme = b'pusc'
pHost = b'HOST'
pPath = b'FTPc'
pUserName = b'RAun'
pUserPassword = b'RApw'
pDNSForm = b'pDNS'
pURL = b'pURL'
pTextEncoding = b'ptxe'
pFTPKind = b'kind'
eScheme = b'esch'
eurlHTTP = b'http'
eurlHTTPS = b'htps'
eurlFTP = b'ftp '
eurlMail = b'mail'
eurlFile = b'file'
eurlGopher = b'gphr'
eurlTelnet = b'tlnt'
eurlNews = b'news'
eurlSNews = b'snws'
eurlNNTP = b'nntp'
eurlMessage = b'mess'
eurlMailbox = b'mbox'
eurlMulti = b'mult'
eurlLaunch = b'laun'
eurlAFP = b'afp '
eurlAT = b'at '
eurlEPPC = b'eppc'
eurlRTSP = b'rtsp'
eurlIMAP = b'imap'
eurlNFS = b'unfs'
eurlPOP = b'upop'
eurlLDAP = b'uldp'
eurlUnknown = b'url?'
kConnSuite = b'macc'
cDevSpec = b'cdev'
cAddressSpec = b'cadr'
cADBAddress = b'cadb'
cAppleTalkAddress = b'cat '
cBusAddress = b'cbus'
cEthernetAddress = b'cen '
cFireWireAddress = b'cfw '
cIPAddress = b'cip '
cLocalTalkAddress = b'clt '
cSCSIAddress = b'cscs'
cTokenRingAddress = b'ctok'
cUSBAddress = b'cusb'
pDeviceType = b'pdvt'
pDeviceAddress = b'pdva'
pConduit = b'pcon'
pProtocol = b'pprt'
pATMachine = b'patm'
pATZone = b'patz'
pATType = b'patt'
pDottedDecimal = b'pipd'
pDNS = b'pdns'
pPort = b'ppor'
pNetwork = b'pnet'
pNode = b'pnod'
pSocket = b'psoc'
pSCSIBus = b'pscb'
pSCSILUN = b'pslu'
eDeviceType = b'edvt'
eAddressSpec = b'eads'
eConduit = b'econ'
eProtocol = b'epro'
eADB = b'eadb'
eAnalogAudio = b'epau'
eAppleTalk = b'epat'
eAudioLineIn = b'ecai'
eAudioLineOut = b'ecal'
eAudioOut = b'ecao'
eBus = b'ebus'
eCDROM = b'ecd '
eCommSlot = b'eccm'
eDigitalAudio = b'epda'
eDisplay = b'edds'
eDVD = b'edvd'
eEthernet = b'ecen'
eFireWire = b'ecfw'
eFloppy = b'efd '
eHD = b'ehd '
eInfrared = b'ecir'
eIP = b'epip'
eIrDA = b'epir'
eIRTalk = b'epit'
eKeyboard = b'ekbd'
eLCD = b'edlc'
eLocalTalk = b'eclt'
eMacIP = b'epmi'
eMacVideo = b'epmv'
eMicrophone = b'ecmi'
eModemPort = b'ecmp'
eModemPrinterPort = b'empp'
eModem = b'edmm'
eMonitorOut = b'ecmn'
eMouse = b'emou'
eNuBusCard = b'ednb'
eNuBus = b'enub'
ePCcard = b'ecpc'
ePCIbus = b'ecpi'
ePCIcard = b'edpi'
ePDSslot = b'ecpd'
ePDScard = b'epds'
ePointingDevice = b'edpd'
ePostScript = b'epps'
ePPP = b'eppp'
ePrinterPort = b'ecpp'
ePrinter = b'edpr'
eSvideo = b'epsv'
eSCSI = b'ecsc'
eSerial = b'epsr'
eSpeakers = b'edsp'
eStorageDevice = b'edst'
eSVGA = b'epsg'
eTokenRing = b'etok'
eTrackball = b'etrk'
eTrackpad = b'edtp'
eUSB = b'ecus'
eVideoIn = b'ecvi'
eVideoMonitor = b'edvm'
eVideoOut = b'ecvo'
cKeystroke = b'kprs'
pKeystrokeKey = b'kMsg'
pModifiers = b'kMod'
pKeyKind = b'kknd'
eModifiers = b'eMds'
eOptionDown = b'Kopt'
eCommandDown = b'Kcmd'
eControlDown = b'Kctl'
eShiftDown = b'Ksft'
eCapsLockDown = b'Kclk'
eKeyKind = b'ekst'
eEscapeKey = b'ks5\x00'
eDeleteKey = b'ks3\x00'
eTabKey = b'ks0\x00'
eReturnKey = b'ks\x24\x00'
eClearKey = b'ksG\x00'
eEnterKey = b'ksL\x00'
eUpArrowKey = b'ks\x7E\x00'
eDownArrowKey = b'ks\x7D\x00'
eLeftArrowKey = b'ks\x7B\x00'
eRightArrowKey = b'ks\x7C\x00'
eHelpKey = b'ksr\x00'
eHomeKey = b'kss\x00'
ePageUpKey = b'kst\x00'
ePageDownKey = b'ksy\x00'
eForwardDelKey = b'ksu\x00'
eEndKey = b'ksw\x00'
eF1Key = b'ksz\x00'
eF2Key = b'ksx\x00'
eF3Key = b'ksc\x00'
eF4Key = b'ksv\x00'
eF5Key = b'ks\x60\x00'
eF6Key = b'ksa\x00'
eF7Key = b'ksb\x00'
eF8Key = b'ksd\x00'
eF9Key = b'kse\x00'
eF10Key = b'ksm\x00'
eF11Key = b'ksg\x00'
eF12Key = b'kso\x00'
eF13Key = b'ksi\x00'
eF14Key = b'ksk\x00'
eF15Key = b'ksq\x00'
keyAELaunchedAsLogInItem = b'lgit'
keyAELaunchedAsServiceItem = b'svit'
# AEUserTermTypes.h
kAEUserTerminology = b'aeut'
kAETerminologyExtension = b'aete'
kAEScriptingSizeResource = b'scsz'
kAEOSAXSizeResource = b'osiz'
kAEUTHasReturningParam = 31
kAEUTOptional = 15
kAEUTlistOfItems = 14
kAEUTEnumerated = 13
kAEUTReadWrite = 12
kAEUTChangesState = 12
kAEUTTightBindingFunction = 12
kAEUTEnumsAreTypes = 11
kAEUTEnumListIsExclusive = 10
kAEUTReplyIsReference = 9
kAEUTDirectParamIsReference = 9
kAEUTParamIsReference = 9
kAEUTPropertyIsReference = 9
kAEUTNotDirectParamIsTarget = 8
kAEUTParamIsTarget = 8
kAEUTApostrophe = 3
kAEUTFeminine = 2
kAEUTMasculine = 1
kAEUTPlural = 0
kLaunchToGetTerminology = (1 << 15)
kDontFindAppBySignature = (1 << 14)
kAlwaysSendSubject = (1 << 13)
kReadExtensionTermsMask = (1 << 15)
kOSIZDontOpenResourceFile = 15
kOSIZdontAcceptRemoteEvents = 14
kOSIZOpenWithReadPermission = 13
kOSIZCodeInSharedLibraries = 11
# AppleEvents.h
keyDirectObject = b'----'
keyErrorNumber = b'errn'
keyErrorString = b'errs'
keyProcessSerialNumber = b'psn '
keyPreDispatch = b'phac'
keySelectProc = b'selh'
keyAERecorderCount = b'recr'
keyAEVersion = b'vers'
kCoreEventClass = b'aevt'
kAEOpenApplication = b'oapp'
kAEOpenDocuments = b'odoc'
kAEPrintDocuments = b'pdoc'
kAEOpenContents = b'ocon'
kAEQuitApplication = b'quit'
kAEAnswer = b'ansr'
kAEApplicationDied = b'obit'
kAEShowPreferences = b'pref'
kAEStartRecording = b'reca'
kAEStopRecording = b'recc'
kAENotifyStartRecording = b'rec1'
kAENotifyStopRecording = b'rec0'
kAENotifyRecording = b'recr'
kAEUnknownSource = 0
kAEDirectCall = 1
kAESameProcess = 2
kAELocalProcess = 3
kAERemoteProcess = 4
# AEInteraction.h
kAEInteractWithSelf = 0
kAEInteractWithLocal = 1
kAEInteractWithAll = 2
kAEDoNotIgnoreHandler = 0x00000000
kAEIgnoreAppPhacHandler = 0x00000001
kAEIgnoreAppEventHandler = 0x00000002
kAEIgnoreSysPhacHandler = 0x00000004
kAEIgnoreSysEventHandler = 0x00000008
kAEIngoreBuiltInEventHandler = 0x00000010
kAEDontDisposeOnResume = 0x80000000
kAENoDispatch = 0
kAEUseStandardDispatch = 0xFFFFFFFF
# AppleScript.h
typeAppleScript = b'ascr'
kAppleScriptSubtype = typeAppleScript
typeASStorage = typeAppleScript
kASSelectInit = 0x1001
kASSelectSetSourceStyles = 0x1002
kASSelectGetSourceStyles = 0x1003
kASSelectGetSourceStyleNames = 0x1004
kASSelectCopySourceAttributes = 0x1005
kASSelectSetSourceAttributes = 0x1006
kASHasOpenHandler = b'hsod'
kASDefaultMinStackSize = 4
kASDefaultPreferredStackSize = 16
kASDefaultMaxStackSize = 16
kASDefaultMinHeapSize = 4
kASDefaultPreferredHeapSize = 16
kASDefaultMaxHeapSize = 32
kASSourceStyleUncompiledText = 0
kASSourceStyleNormalText = 1
kASSourceStyleLanguageKeyword = 2
kASSourceStyleApplicationKeyword = 3
kASSourceStyleComment = 4
kASSourceStyleLiteral = 5
kASSourceStyleUserSymbol = 6
kASSourceStyleObjectSpecifier = 7
kASNumberOfSourceStyles = 8
# ASDebugging.h
kOSAModeDontDefine = 0x0001
kASSelectSetPropertyObsolete = 0x1101
kASSelectGetPropertyObsolete = 0x1102
kASSelectSetHandlerObsolete = 0x1103
kASSelectGetHandlerObsolete = 0x1104
kASSelectGetAppTerminologyObsolete = 0x1105
kASSelectSetProperty = 0x1106
kASSelectGetProperty = 0x1107
kASSelectSetHandler = 0x1108
kASSelectGetHandler = 0x1109
kASSelectGetAppTerminology = 0x110A
kASSelectGetSysTerminology = 0x110B
kASSelectGetPropertyNames = 0x110C
kASSelectGetHandlerNames = 0x110D
# ASRegistry.h
keyAETarget = b'targ'
keySubjectAttr = b'subj'
keyASReturning = b'Krtn'
kASAppleScriptSuite = b'ascr'
kASScriptEditorSuite = b'ToyS'
kASTypeNamesSuite = b'tpnm'
typeAETE = b'aete'
typeAEUT = b'aeut'
kGetAETE = b'gdte'
kGetAEUT = b'gdut'
kUpdateAEUT = b'udut'
kUpdateAETE = b'udte'
kCleanUpAEUT = b'cdut'
kASComment = b'cmnt'
kASLaunchEvent = b'noop'
keyScszResource = b'scsz'
typeScszResource = b'scsz'
kASSubroutineEvent = b'psbr'
keyASSubroutineName = b'snam'
kASPrepositionalSubroutine = b'psbr'
keyASPositionalArgs = b'parg'
keyAppHandledCoercion = b'idas'
kASStartLogEvent = b'log1'
kASStopLogEvent = b'log0'
kASCommentEvent = b'cmnt'
kASAdd = b'+ '
kASSubtract = b'- '
kASMultiply = b'* '
kASDivide = b'/ '
kASQuotient = b'div '
kASRemainder = b'mod '
kASPower = b'^ '
kASEqual = kAEEquals
kASNotEqual = 0xAD202020
kASGreaterThan = kAEGreaterThan
kASGreaterThanOrEqual = kAEGreaterThanEquals
kASLessThan = kAELessThan
kASLessThanOrEqual = kAELessThanEquals
kASComesBefore = b'cbfr'
kASComesAfter = b'cafr'
kASConcatenate = b'ccat'
kASStartsWith = kAEBeginsWith
kASEndsWith = kAEEndsWith
kASContains = kAEContains
kASAnd = kAEAND
kASOr = kAEOR
kASNot = kAENOT
kASNegate = b'neg '
keyASArg = b'arg '
kASErrorEventCode = b'err '
kOSAErrorArgs = b'erra'
keyAEErrorObject = b'erob'
pLength = b'leng'
pReverse = b'rvse'
pRest = b'rest'
pInherits = b'c@#^'
pProperties = b'pALL'
keyASUserRecordFields = b'usrf'
typeUserRecordFields = typeAEList
keyASPrepositionAt = b'at '
keyASPrepositionIn = b'in '
keyASPrepositionFrom = b'from'
keyASPrepositionFor = b'for '
keyASPrepositionTo = b'to '
keyASPrepositionThru = b'thru'
keyASPrepositionThrough = b'thgh'
keyASPrepositionBy = b'by '
keyASPrepositionOn = b'on '
keyASPrepositionInto = b'into'
keyASPrepositionOnto = b'onto'
keyASPrepositionBetween = b'btwn'
keyASPrepositionAgainst = b'agst'
keyASPrepositionOutOf = b'outo'
keyASPrepositionInsteadOf = b'isto'
keyASPrepositionAsideFrom = b'asdf'
keyASPrepositionAround = b'arnd'
keyASPrepositionBeside = b'bsid'
keyASPrepositionBeneath = b'bnth'
keyASPrepositionUnder = b'undr'
keyASPrepositionOver = b'over'
keyASPrepositionAbove = b'abve'
keyASPrepositionBelow = b'belw'
keyASPrepositionApartFrom = b'aprt'
keyASPrepositionGiven = b'givn'
keyASPrepositionWith = b'with'
keyASPrepositionWithout = b'wout'
keyASPrepositionAbout = b'abou'
keyASPrepositionSince = b'snce'
keyASPrepositionUntil = b'till'
kDialectBundleResType = b'Dbdl'
cConstant = typeEnumerated
cClassIdentifier = pClass
cObjectBeingExamined = typeObjectBeingExamined
cList = typeAEList
cSmallReal = typeIEEE32BitFloatingPoint
cReal = typeIEEE64BitFloatingPoint
cRecord = typeAERecord
cReference = cObjectSpecifier
cUndefined = b'undf'
cMissingValue = b'msng'
cSymbol = b'symb'
cLinkedList = b'llst'
cVector = b'vect'
cEventIdentifier = b'evnt'
cKeyIdentifier = b'kyid'
cUserIdentifier = b'uid '
cPreposition = b'prep'
cKeyForm = enumKeyForm
cScript = b'scpt'
cHandler = b'hand'
cProcedure = b'proc'
cHandleBreakpoint = b'brak'
cClosure = b'clsr'
cRawData = b'rdat'
cStringClass = typeChar
cNumber = b'nmbr'
cListElement = b'celm'
cListOrRecord = b'lr '
cListOrString = b'ls '
cListRecordOrString = b'lrs '
cNumberOrString = b'ns '
cNumberOrDateTime = b'nd '
cNumberDateTimeOrString = b'nds '
cAliasOrString = b'sf '
cSeconds = b'scnd'
typeSound = b'snd '
enumBooleanValues = b'boov'
kAETrue = typeTrue
kAEFalse = typeFalse
enumMiscValues = b'misc'
kASCurrentApplication = b'cura'
formUserPropertyID = b'usrp'
cString = cStringClass
pASIt = b'it '
pASMe = b'me '
pASResult = b'rslt'
pASSpace = b'spac'
pASReturn = b'ret '
pASTab = b'tab '
pASPi = b'pi '
pASParent = b'pare'
kASInitializeEventCode = b'init'
pASPrintLength = b'prln'
pASPrintDepth = b'prdp'
pASTopLevelScript = b'ascr'
kAECase = b'case'
kAEDiacritic = b'diac'
kAEWhiteSpace = b'whit'
kAEHyphens = b'hyph'
kAEExpansion = b'expa'
kAEPunctuation = b'punc'
kAEZenkakuHankaku = b'zkhk'
kAESmallKana = b'skna'
kAEKataHiragana = b'hika'
kASConsiderReplies = b'rmte'
kASNumericStrings = b'nume'
enumConsiderations = b'cons'
kAECaseConsiderMask = 0x00000001
kAEDiacriticConsiderMask = 0x00000002
kAEWhiteSpaceConsiderMask = 0x00000004
kAEHyphensConsiderMask = 0x00000008
kAEExpansionConsiderMask = 0x00000010
kAEPunctuationConsiderMask = 0x00000020
kASConsiderRepliesConsiderMask = 0x00000040
kASNumericStringsConsiderMask = 0x00000080
kAECaseIgnoreMask = 0x00010000
kAEDiacriticIgnoreMask = 0x00020000
kAEWhiteSpaceIgnoreMask = 0x00040000
kAEHyphensIgnoreMask = 0x00080000
kAEExpansionIgnoreMask = 0x00100000
kAEPunctuationIgnoreMask = 0x00200000
kASConsiderRepliesIgnoreMask = 0x00400000
kASNumericStringsIgnoreMask = 0x00800000
enumConsidsAndIgnores = b'csig'
cCoercion = b'coec'
cCoerceUpperCase = b'txup'
cCoerceLowerCase = b'txlo'
cCoerceRemoveDiacriticals = b'txdc'
cCoerceRemovePunctuation = b'txpc'
cCoerceRemoveHyphens = b'txhy'
cCoerceOneByteToTwoByte = b'txex'
cCoerceRemoveWhiteSpace = b'txws'
cCoerceSmallKana = b'txsk'
cCoerceZenkakuhankaku = b'txze'
cCoerceKataHiragana = b'txkh'
cZone = b'zone'
cMachine = b'mach'
cAddress = b'addr'
cRunningAddress = b'radd'
cStorage = b'stor'
pASWeekday = b'wkdy'
pASMonth = b'mnth'
pASDay = b'day '
pASYear = b'year'
pASTime = b'time'
pASDateString = b'dstr'
pASTimeString = b'tstr'
cMonth = pASMonth
cJanuary = b'jan '
cFebruary = b'feb '
cMarch = b'mar '
cApril = b'apr '
cMay = b'may '
cJune = b'jun '
cJuly = b'jul '
cAugust = b'aug '
cSeptember = b'sep '
cOctober = b'oct '
cNovember = b'nov '
cDecember = b'dec '
cWeekday = pASWeekday
cSunday = b'sun '
cMonday = b'mon '
cTuesday = b'tue '
cWednesday = b'wed '
cThursday = b'thu '
cFriday = b'fri '
cSaturday = b'sat '
pASQuote = b'quot'
pASSeconds = b'secs'
pASMinutes = b'min '
pASHours = b'hour'
pASDays = b'days'
pASWeeks = b'week'
cWritingCodeInfo = b'citl'
pScriptCode = b'pscd'
pLangCode = b'plcd'
kASMagicTellEvent = b'tell'
kASMagicEndTellEvent = b'tend'
# DigitalHubRegistry.h
kDigiHubEventClass = b'dhub'
kDigiHubMusicCD = b'aucd'
kDigiHubPictureCD = b'picd'
kDigiHubVideoDVD = b'vdvd'
kDigiHubBlankCD = b'bcd '
kDigiHubBlankDVD = b'bdvd'
# OSA.h
kOSAComponentType = b'osa '
kOSAGenericScriptingComponentSubtype = b'scpt'
kOSAFileType = b'osas'
kOSASuite = b'ascr'
kOSARecordedText = b'recd'
kOSAScriptIsModified = b'modi'
kOSAScriptIsTypeCompiledScript = b'cscr'
kOSAScriptIsTypeScriptValue = b'valu'
kOSAScriptIsTypeScriptContext = b'cntx'
kOSAScriptBestType = b'best'
kOSACanGetSource = b'gsrc'
typeOSADialectInfo = b'difo'
keyOSADialectName = b'dnam'
keyOSADialectCode = b'dcod'
keyOSADialectLangCode = b'dlcd'
keyOSADialectScriptCode = b'dscd'
kOSANullScript = 0
kOSANullMode = 0
kOSAModeNull = 0
kOSASupportsCompiling = 0x0002
kOSASupportsGetSource = 0x0004
kOSASupportsAECoercion = 0x0008
kOSASupportsAESending = 0x0010
kOSASupportsRecording = 0x0020
kOSASupportsConvenience = 0x0040
kOSASupportsDialects = 0x0080
kOSASupportsEventHandling = 0x0100
kOSASelectLoad = 0x0001
kOSASelectStore = 0x0002
kOSASelectExecute = 0x0003
kOSASelectDisplay = 0x0004
kOSASelectScriptError = 0x0005
kOSASelectDispose = 0x0006
kOSASelectSetScriptInfo = 0x0007
kOSASelectGetScriptInfo = 0x0008
kOSASelectSetActiveProc = 0x0009
kOSASelectGetActiveProc = 0x000A
kOSASelectCopyDisplayString = 0x000B
kOSASelectScriptingComponentName = 0x0102
kOSASelectCompile = 0x0103
kOSASelectCopyID = 0x0104
kOSASelectCopyScript = 0x0105
kOSASelectGetSource = 0x0201
kOSASelectCopySourceString = 0x0202
kOSASelectCoerceFromDesc = 0x0301
kOSASelectCoerceToDesc = 0x0302
kOSASelectSetSendProc = 0x0401
kOSASelectGetSendProc = 0x0402
kOSASelectSetCreateProc = 0x0403
kOSASelectGetCreateProc = 0x0404
kOSASelectSetDefaultTarget = 0x0405
kOSASelectStartRecording = 0x0501
kOSASelectStopRecording = 0x0502
kOSASelectLoadExecute = 0x0601
kOSASelectCompileExecute = 0x0602
kOSASelectDoScript = 0x0603
kOSASelectSetCurrentDialect = 0x0701
kOSASelectGetCurrentDialect = 0x0702
kOSASelectAvailableDialects = 0x0703
kOSASelectGetDialectInfo = 0x0704
kOSASelectAvailableDialectCodeList = 0x0705
kOSASelectSetResumeDispatchProc = 0x0801
kOSASelectGetResumeDispatchProc = 0x0802
kOSASelectExecuteEvent = 0x0803
kOSASelectDoEvent = 0x0804
kOSASelectMakeContext = 0x0805
kOSASelectComponentSpecificStart = 0x1001
kOSAModePreventGetSource = 0x00000001
kOSAModeNeverInteract = kAENeverInteract
kOSAModeCanInteract = kAECanInteract
kOSAModeAlwaysInteract = kAEAlwaysInteract
kOSAModeDontReconnect = kAEDontReconnect
kOSAModeCantSwitchLayer = 0x00000040
kOSAModeDoRecord = 0x00001000
kOSAModeCompileIntoContext = 0x00000002
kOSAModeAugmentContext = 0x00000004
kOSAModeDisplayForHumans = 0x00000008
kOSAModeDontStoreParent = 0x00010000
kOSAModeDispatchToDirectObject = 0x00020000
kOSAModeDontGetDataForArguments = 0x00040000
kOSAModeFullyQualifyDescriptors = 0x00080000
kOSAScriptResourceType = kOSAGenericScriptingComponentSubtype
typeOSAGenericStorage = kOSAScriptResourceType
kOSAErrorNumber = keyErrorNumber
kOSAErrorMessage = keyErrorString
kOSAErrorBriefMessage = b'errb'
kOSAErrorApp = b'erap'
kOSAErrorPartialResult = b'ptlr'
kOSAErrorOffendingObject = b'erob'
kOSAErrorExpectedType = b'errt'
kOSAErrorRange = b'erng'
typeOSAErrorRange = b'erng'
keyOSASourceStart = b'srcs'
keyOSASourceEnd = b'srce'
kOSAUseStandardDispatch = kAEUseStandardDispatch
kOSANoDispatch = kAENoDispatch
kOSADontUsePhac = 0x0001
# OSAComp.h
# OSAGeneric.h
kGenericComponentVersion = 0x0100
kGSSSelectGetDefaultScriptingComponent = 0x1001
kGSSSelectSetDefaultScriptingComponent = 0x1002
kGSSSelectGetScriptingComponent = 0x1003
kGSSSelectGetScriptingComponentFromStored = 0x1004
kGSSSelectGenericToRealID = 0x1005
kGSSSelectRealToGenericID = 0x1006
kGSSSelectOutOfRange = 0x1007
# Miscellaneous
|
|
"""
Event parser and human readable log generator.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/logbook/
"""
import asyncio
import logging
from datetime import timedelta
from itertools import groupby
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components import sun
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED,
STATE_NOT_HOME, STATE_OFF, STATE_ON, ATTR_HIDDEN, HTTP_BAD_REQUEST,
EVENT_LOGBOOK_ENTRY)
from homeassistant.core import State, split_entity_id, DOMAIN as HA_DOMAIN
DOMAIN = 'logbook'
DEPENDENCIES = ['recorder', 'frontend']
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE = 'exclude'
CONF_INCLUDE = 'include'
CONF_ENTITIES = 'entities'
CONF_DOMAINS = 'domains'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
CONF_EXCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
}),
CONF_INCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
})
}),
}, extra=vol.ALLOW_EXTRA)
GROUP_BY_MINUTES = 15
CONTINUOUS_DOMAINS = ['proximity', 'sensor']
ATTR_NAME = 'name'
ATTR_MESSAGE = 'message'
ATTR_DOMAIN = 'domain'
ATTR_ENTITY_ID = 'entity_id'
LOG_MESSAGE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
def log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
hass.add_job(async_log_entry, hass, name, message, domain, entity_id)
def async_log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
data = {
ATTR_NAME: name,
ATTR_MESSAGE: message
}
if domain is not None:
data[ATTR_DOMAIN] = domain
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)
@asyncio.coroutine
def setup(hass, config):
"""Listen for download events to download files."""
@callback
def log_message(service):
"""Handle sending notification message service calls."""
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
message.hass = hass
message = message.async_render()
async_log_entry(hass, name, message, domain, entity_id)
hass.http.register_view(LogbookView(config.get(DOMAIN, {})))
yield from hass.components.frontend.async_register_built_in_panel(
'logbook', 'logbook', 'mdi:format-list-bulleted-type')
hass.services.async_register(
DOMAIN, 'log', log_message, schema=LOG_MESSAGE_SCHEMA)
return True
class LogbookView(HomeAssistantView):
"""Handle logbook view requests."""
url = '/api/logbook'
name = 'api:logbook'
extra_urls = ['/api/logbook/{datetime}']
def __init__(self, config):
"""Initilalize the logbook view."""
self.config = config
@asyncio.coroutine
def get(self, request, datetime=None):
"""Retrieve logbook entries."""
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
else:
datetime = dt_util.start_of_local_day()
start_day = dt_util.as_utc(datetime)
end_day = start_day + timedelta(days=1)
hass = request.app['hass']
events = yield from hass.async_add_job(
_get_events, hass, self.config, start_day, end_day)
return self.json(events)
class Entry(object):
"""A human readable version of the log."""
def __init__(self, when=None, name=None, message=None, domain=None,
entity_id=None):
"""Initialize the entry."""
self.when = when
self.name = name
self.message = message
self.domain = domain
self.entity_id = entity_id
def as_dict(self):
"""Convert entry to a dict to be used within JSON."""
return {
'when': self.when,
'name': self.name,
'message': self.message,
'domain': self.domain,
'entity_id': self.entity_id,
}
def humanify(events):
"""Generate a converted list of events into Entry objects.
Will try to group events if possible:
- if 2+ sensor updates in GROUP_BY_MINUTES, show last
- if home assistant stop and start happen in same minute call it restarted
"""
# Group events in batches of GROUP_BY_MINUTES
for _, g_events in groupby(
events,
lambda event: event.time_fired.minute // GROUP_BY_MINUTES):
events_batch = list(g_events)
# Keep track of last sensor states
last_sensor_event = {}
# Group HA start/stop events
# Maps minute of event to 1: stop, 2: stop + start
start_stop_events = {}
# Process events
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id is None:
continue
if entity_id.startswith(tuple('{}.'.format(
domain) for domain in CONTINUOUS_DOMAINS)):
last_sensor_event[entity_id] = event
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if event.time_fired.minute in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 1
elif event.event_type == EVENT_HOMEASSISTANT_START:
if event.time_fired.minute not in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 2
# Yield entries
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
# If last_changed != last_updated only attributes have changed
# we do not report on that yet. Also filter auto groups.
if not to_state or \
to_state.last_changed != to_state.last_updated or \
to_state.domain == 'group' and \
to_state.attributes.get('auto', False):
continue
domain = to_state.domain
# Skip all but the last sensor state
if domain in CONTINUOUS_DOMAINS and \
event != last_sensor_event[to_state.entity_id]:
continue
# Don't show continuous sensor value changes in the logbook
if domain in CONTINUOUS_DOMAINS and \
to_state.attributes.get('unit_of_measurement'):
continue
yield Entry(
event.time_fired,
name=to_state.name,
message=_entry_message_from_state(domain, to_state),
domain=domain,
entity_id=to_state.entity_id)
elif event.event_type == EVENT_HOMEASSISTANT_START:
if start_stop_events.get(event.time_fired.minute) == 2:
continue
yield Entry(
event.time_fired, "Home Assistant", "started",
domain=HA_DOMAIN)
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if start_stop_events.get(event.time_fired.minute) == 2:
action = "restarted"
else:
action = "stopped"
yield Entry(
event.time_fired, "Home Assistant", action,
domain=HA_DOMAIN)
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain is None and entity_id is not None:
try:
domain = split_entity_id(str(entity_id))[0]
except IndexError:
pass
yield Entry(
event.time_fired, event.data.get(ATTR_NAME),
event.data.get(ATTR_MESSAGE), domain,
entity_id)
def _get_events(hass, config, start_day, end_day):
"""Get events for a period of time."""
from homeassistant.components.recorder.models import Events
from homeassistant.components.recorder.util import (
execute, session_scope)
with session_scope(hass=hass) as session:
query = session.query(Events).order_by(
Events.time_fired).filter(
(Events.time_fired > start_day) &
(Events.time_fired < end_day))
events = execute(query)
return humanify(_exclude_events(events, config))
def _exclude_events(events, config):
"""Get lists of excluded entities and platforms."""
excluded_entities = []
excluded_domains = []
included_entities = []
included_domains = []
exclude = config.get(CONF_EXCLUDE)
if exclude:
excluded_entities = exclude[CONF_ENTITIES]
excluded_domains = exclude[CONF_DOMAINS]
include = config.get(CONF_INCLUDE)
if include:
included_entities = include[CONF_ENTITIES]
included_domains = include[CONF_DOMAINS]
filtered_events = []
for event in events:
domain, entity_id = None, None
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
# Do not report on new entities
if event.data.get('old_state') is None:
continue
# Do not report on entity removal
if not to_state:
continue
# exclude entities which are customized hidden
hidden = to_state.attributes.get(ATTR_HIDDEN, False)
if hidden:
continue
domain = to_state.domain
entity_id = to_state.entity_id
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain or entity_id:
# filter if only excluded is configured for this domain
if excluded_domains and domain in excluded_domains and \
not included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if only included is configured for this domain
elif not excluded_domains and included_domains and \
domain not in included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if included and excluded is configured for this domain
elif excluded_domains and included_domains and \
(domain not in included_domains or
domain in excluded_domains):
if (included_entities and entity_id not in included_entities) \
or not included_entities or domain in excluded_domains:
continue
# filter if only included is configured for this entity
elif not excluded_domains and not included_domains and \
included_entities and entity_id not in included_entities:
continue
# check if logbook entry is excluded for this entity
if entity_id in excluded_entities:
continue
filtered_events.append(event)
return filtered_events
# pylint: disable=too-many-return-statements
def _entry_message_from_state(domain, state):
"""Convert a state to a message for the logbook."""
# We pass domain in so we don't have to split entity_id again
if domain == 'device_tracker':
if state.state == STATE_NOT_HOME:
return 'is away'
return 'is at {}'.format(state.state)
elif domain == 'sun':
if state.state == sun.STATE_ABOVE_HORIZON:
return 'has risen'
return 'has set'
elif state.state == STATE_ON:
# Future: combine groups and its entity entries ?
return "turned on"
elif state.state == STATE_OFF:
return "turned off"
return "changed to {}".format(state.state)
|
|
#!/usr/bin/env python
"""
Module for simulation.
"""
import copy
import os
import numpy as np
import scipy.interpolate
import astropy.io.fits as pyfits
import healpy as hp
import numpy.lib.recfunctions as recfuncs
import fitsio
import ugali.observation.catalog
import ugali.observation.mask
import ugali.observation.roi
import ugali.utils.projector
import ugali.utils.stats
import ugali.analysis.scan
from ugali.utils.projector import gal2cel, cel2gal, sr2deg, mod2dist
from ugali.utils.healpix import ang2pix, pix2ang
from ugali.utils.logger import logger
from ugali.utils.config import Config
class Generator:
"""
Class for generating the parameters of the simulation.
"""
def __init__(self,config, seed=None):
self.config = Config(config)
self.seed = seed
if self.seed is not None: np.random.seed(self.seed)
def generate(self, size=1):
params = dict(self.config['simulate']['params'])
dtype = [(n,'>f4') for n in params.keys()]
data = np.zeros(size,dtype=dtype)
lon,lat = params.pop('lon'),params.pop('lat')
data['lon'],data['lat'] = self.sky(lon,lat,size)
for key,value in params.items():
if value[-1] == 'linear':
data[key] = self.linear(value[0],value[1],size)
elif value[-1] == 'log':
data[key] = self.logarithmic(value[0],value[1],size)
else:
raise Exception('...')
return data
def sky(self,lon=None,lat=None,size=1):
logger.info("Generating %i random points..."%size)
# Random longitue and latitude
lon,lat = ugali.utils.stats.sky(lon,lat,size=10*size)
# Random healpix coordinates inside footprint
nside_pixel = self.config['coords']['nside_pixel']
pixels = ang2pix(nside_pixel,lon,lat)
if np.unique(pixels).size > 1:
inside = ugali.utils.skymap.inFootprint(self.config,pixels,nside=nside_pixel)
else:
inside = np.ones(len(pixels),dtype=bool)
return lon[inside][:size],lat[inside][:size]
def linear(self,low,high,size):
return np.random.uniform(low,high,size)
def logarithmic(self,low,high,size):
if low==0 and high==0:
logger.warning("Can't sample logarithmically with boundary of zero.")
return np.zeros(size)
return 10**np.random.uniform(np.log10(low),np.log10(high),size)
def detectability(self,**kwargs):
"""
An a priori detectability proxy.
"""
distance_modulus = kwargs.get('distance_modulus')
distance = mod2dist(distance_modulus)
stellar_mass = kwargs.get('stellar_mass')
extension = kwargs.get('extension')
# Normalized to 10^3 Msolar at mod=18
norm = 10**3/mod2dist(18)**2
detect = stellar_mass / distance**2
detect /= norm
def write(self, filename, data=None):
if data is None: data = self.results
logger.info("Writing %s..."%filename)
if filename.endswith('.npy'):
np.save(filename,data)
elif filename.endswith('.fits'):
# Copies data, so be careful..
out = np.rec.array(data)
out.dtype.names = np.char.upper(out.dtype.names)
hdu = pyfits.new_table(out)
hdu.writeto(filename,clobber=True)
elif filename.endswith('.txt') or filename.endswith('.dat'):
np.savetxt(filename,data)
elif filename.endswith('.csv'):
np.savetxt(filename,data,delimiter=',')
else:
raise Exception('Unrecognized file extension: %s'%filename)
def run(self, outfile=None, size=None):
if size is None: size = self.config['simulate']['size']
data = self.generate(size)
dtype=[('kernel','S18'),('ts','>f4'),('fit_kernel','S18'),('fit_ts','>f4'),
('fit_mass','>f4'),('fit_mass_err','>f4'),
('fit_distance','>f4'),('fit_distance_err','>f4')]
results = np.array(np.nan*np.ones(size),dtype=dtype)
results = recfuncs.merge_arrays([data,results],flatten=True,asrecarray=False,usemask=False)
self.results = results
if outfile: self.write(outfile,results)
for i,d in enumerate(data):
params = dict(list(zip(data.dtype.names,d)))
lon,lat = params['lon'],params['lat']
distance_modulus = params['distance_modulus']
logger.info('\n(%i/%i); (lon, lat) = (%.2f, %.2f)'%(i+1,len(data),lon,lat))
roi = ugali.analysis.loglike.createROI(self.config,lon,lat)
mask = ugali.analysis.loglike.createMask(self.config,roi)
isochrone = ugali.analysis.loglike.createIsochrone(self.config)
kernel = ugali.analysis.loglike.createKernel(self.config,lon,lat)
pix = roi.indexTarget(lon,lat)
simulator = Simulator(self.config,roi)
#catalog = simulator.simulate(seed=self.seed, **params)
catalog = simulator.simulate(**params)
#print "Catalog annulus contains:",roi.inAnnulus(simulator.catalog.lon,simulator.catalog.lat).sum()
logger.info("Simulated catalog annulus contains %i stars"%roi.inAnnulus(catalog.lon,catalog.lat).sum())
if len(catalog.lon) < 1000:
logger.error("Simulation contains too few objects; skipping...")
continue
"""
like = ugali.analysis.loglike.LogLikelihood(self.config, roi, mask, catalog, isochrone, kernel)
like.set_params(distance_modulus=params['distance_modulus'])
like.sync_params()
results[i]['ts'] = 2*like.fit_richness()[0]
print 'TS=',results[i]['ts']
like2 = ugali.analysis.loglike.LogLikelihood(self.config, roi, mask, simulator.catalog, isochrone, kernel)
like2.set_params(distance_modulus=params['distance_modulus'])
like2.sync_params()
print 'TS=',2*like2.fit_richness()[0]
"""
#return simulator,like,like2
# Index of closest distance modulus
grid = ugali.analysis.scan.GridSearch(self.config,roi,mask,catalog,isochrone,kernel)
self.catalog = catalog
self.simulator = simulator
self.grid = grid
self.loglike = self.grid.loglike
# ADW: Should allow fit_distance to float in order to model search procedure
#fit_distance = float(distance_modulus)
distance_idx = np.fabs(grid.distance_modulus_array-params['distance_modulus']).argmin()
fit_distance = grid.distance_modulus_array[distance_idx]
grid.search(coords=(lon,lat),distance_modulus=fit_distance)
logger.info(str(self.loglike))
mle = grid.mle()
results[i]['kernel'] = simulator.kernel.name
results[i]['fit_kernel'] = grid.loglike.kernel.name
results[i]['ts'] = 2*grid.log_likelihood_sparse_array[distance_idx][pix]
results[i]['fit_ts'] = 2*np.max(grid.log_likelihood_sparse_array[:,pix])
results[i]['fit_mass'] = grid.stellar_mass_conversion*mle['richness']
results[i]['fit_distance'] = fit_distance #mle['distance_modulus']
err = grid.err()
richness_err = (err['richness'][1]-err['richness'][0])/2.
results[i]['fit_mass_err'] = grid.stellar_mass_conversion*richness_err
distance_modulus_err = (err['distance_modulus'][1]-err['distance_modulus'][0])/2.
results[i]['fit_distance_err'] = distance_modulus_err
for d in dtype:
logger.info('\t%s: %s'%(d[0], results[i][d[0]]))
if i%self.config['simulate']['save']==0 and outfile:
self.write(outfile,results)
if outfile: self.write(outfile,results)
return results
############################################################
class Simulator(object):
"""
Class for simulating catalog data.
"""
def __init__(self, config, roi, **kwargs):
self.config = ugali.utils.config.Config(config)
self.roi = roi
#np.random.seed(0)
params = dict(self.config)
if self.config['simulate'].get('isochrone') is None:
params['simulate']['isochrone'] = params['isochrone']
if self.config['simulate'].get('kernel') is None:
params['simulate']['kernel'] = params['kernel']
self.isochrone = ugali.analysis.loglike.createIsochrone(params)
self.kernel = ugali.analysis.loglike.createKernel(params['simulate'],lon=self.roi.lon,lat=self.roi.lat)
self.mask = ugali.analysis.loglike.createMask(self.config,self.roi)
self._create_catalog(kwargs.get('catalog'))
self.photo_err_1,self.photo_err_2 = self.mask.photo_err_1,self.mask.photo_err_2
#self._photometricErrors()
self._setup_subpix()
#self._setup_cmd()
def _create_catalog(self,catalog=None):
"""
Bundle it.
"""
if catalog is None:
catalog = ugali.analysis.loglike.createCatalog(self.config,self.roi)
cut = self.mask.restrictCatalogToObservableSpace(catalog)
self.catalog = catalog.applyCut(cut)
def _photometricErrors(self, n_per_bin=100, plot=False):
"""
Realistic photometric errors estimated from catalog objects and mask.
Extend below the magnitude threshold with a flat extrapolation.
"""
self.catalog.spatialBin(self.roi)
if len(self.catalog.mag_1) < n_per_bin:
logger.warning("Catalog contains fewer objects than requested to calculate errors.")
n_per_bin = int(len(self.catalog.mag_1) / 3)
# Band 1
mag_1_thresh = self.mask.mask_1.mask_roi_sparse[self.catalog.pixel_roi_index] - self.catalog.mag_1
sorting_indices = np.argsort(mag_1_thresh)
mag_1_thresh_sort = mag_1_thresh[sorting_indices]
mag_err_1_sort = self.catalog.mag_err_1[sorting_indices]
# ADW: Can't this be done with np.median(axis=?)
mag_1_thresh_medians = []
mag_err_1_medians = []
for i in range(0, int(len(mag_1_thresh) / float(n_per_bin))):
mag_1_thresh_medians.append(np.median(mag_1_thresh_sort[n_per_bin * i: n_per_bin * (i + 1)]))
mag_err_1_medians.append(np.median(mag_err_1_sort[n_per_bin * i: n_per_bin * (i + 1)]))
if mag_1_thresh_medians[0] > 0.:
mag_1_thresh_medians = np.insert(mag_1_thresh_medians, 0, -99.)
mag_err_1_medians = np.insert(mag_err_1_medians, 0, mag_err_1_medians[0])
self.photo_err_1 = scipy.interpolate.interp1d(mag_1_thresh_medians, mag_err_1_medians,
bounds_error=False, fill_value=mag_err_1_medians[-1])
# Band 2
mag_2_thresh = self.mask.mask_2.mask_roi_sparse[self.catalog.pixel_roi_index] - self.catalog.mag_2
sorting_indices = np.argsort(mag_2_thresh)
mag_2_thresh_sort = mag_2_thresh[sorting_indices]
mag_err_2_sort = self.catalog.mag_err_2[sorting_indices]
mag_2_thresh_medians = []
mag_err_2_medians = []
for i in range(0, int(len(mag_2_thresh) / float(n_per_bin))):
mag_2_thresh_medians.append(np.median(mag_2_thresh_sort[n_per_bin * i: n_per_bin * (i + 1)]))
mag_err_2_medians.append(np.median(mag_err_2_sort[n_per_bin * i: n_per_bin * (i + 1)]))
if mag_2_thresh_medians[0] > 0.:
mag_2_thresh_medians = np.insert(mag_2_thresh_medians, 0, -99.)
mag_err_2_medians = np.insert(mag_err_2_medians, 0, mag_err_2_medians[0])
self.photo_err_2 = scipy.interpolate.interp1d(mag_2_thresh_medians, mag_err_2_medians,
bounds_error=False, fill_value=mag_err_2_medians[-1])
def _setup_subpix(self,nside=2**16):
"""
Subpixels for random position generation.
"""
# Only setup once...
if hasattr(self,'subpix'): return
# Simulate over full ROI
self.roi_radius = self.config['coords']['roi_radius']
# Setup background spatial stuff
logger.info("Setup subpixels...")
self.nside_pixel = self.config['coords']['nside_pixel']
self.nside_subpixel = self.nside_pixel * 2**4 # Could be config parameter
epsilon = np.degrees(hp.max_pixrad(self.nside_pixel)) # Pad roi radius to cover edge healpix
subpix = ugali.utils.healpix.query_disc(self.nside_subpixel,self.roi.vec,self.roi_radius+epsilon)
superpix = ugali.utils.healpix.superpixel(subpix,self.nside_subpixel,self.nside_pixel)
self.subpix = subpix[np.in1d(superpix,self.roi.pixels)]
def _setup_cmd(self,mode='cloud-in-cells'):
"""
The purpose here is to create a more finely binned
background CMD to sample from.
"""
# Only setup once...
if hasattr(self,'bkg_lambda'): return
logger.info("Setup color...")
# In the limit theta->0: 2*pi*(1-cos(theta)) -> pi*theta**2
# (Remember to convert from sr to deg^2)
#solid_angle_roi = sr2deg(2*np.pi*(1-np.cos(np.radians(self.roi_radius))))
solid_angle_roi = self.roi.area_pixel*len(self.roi.pixels)
# Large CMD bins cause problems when simulating
config = Config(self.config)
config['color']['n_bins'] *= 5 #10
config['mag']['n_bins'] *= 1 #2
#config['mask']['minimum_solid_angle'] = 0
roi = ugali.analysis.loglike.createROI(config,self.roi.lon,self.roi.lat)
mask = ugali.analysis.loglike.createMask(config,roi)
self.bkg_centers_color = roi.centers_color
self.bkg_centers_mag = roi.centers_mag
# Background CMD has units: [objs / deg^2 / mag^2]
cmd_background = mask.backgroundCMD(self.catalog,mode)
self.bkg_lambda=cmd_background*solid_angle_roi*roi.delta_color*roi.delta_mag
np.sum(self.bkg_lambda)
# Clean up
del config, roi, mask
def toy_background(self,mc_source_id=2,seed=None):
"""
Quick uniform background generation.
"""
logger.info("Running toy background simulation...")
size = 20000
nstar = np.random.poisson(size)
#np.random.seed(0)
logger.info("Simulating %i background stars..."%nstar)
### # Random points from roi pixels
### idx = np.random.randint(len(self.roi.pixels)-1,size=nstar)
### pix = self.roi.pixels[idx]
# Random points drawn from subpixels
logger.info("Generating uniform positions...")
idx = np.random.randint(0,len(self.subpix)-1,size=nstar)
lon,lat = pix2ang(self.nside_subpixel,self.subpix[idx])
pix = ang2pix(self.nside_pixel, lon, lat)
lon,lat = pix2ang(self.nside_pixel,pix)
# Single color
#mag_1 = 19.05*np.ones(len(pix))
#mag_2 = 19.10*np.ones(len(pix))
# Uniform in color
logger.info("Generating uniform CMD...")
mag_1 = np.random.uniform(self.config['mag']['min'],self.config['mag']['max'],size=nstar)
color = np.random.uniform(self.config['color']['min'],self.config['color']['max'],size=nstar)
mag_2 = mag_1 - color
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.nside_pixel))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.nside_pixel))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
#mag_err_1 = 1.0*np.ones(len(pix))
#mag_err_2 = 1.0*np.ones(len(pix))
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
mc_source_id = mc_source_id * np.ones(len(mag_1))
select = (mag_lim_1>mag_1)&(mag_lim_2>mag_2)
hdu = ugali.observation.catalog.makeHDU(self.config,mag_1[select],mag_err_1[select],
mag_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog
def background(self,mc_source_id=2,seed=None):
"""
Create a simulation of the background stellar population.
Because some stars have been clipped to generate the CMD,
this function tends to slightly underestimate (~1%) the
background as compared to the true catalog.
The simulation of background object colors relies on the
data-derived CMD. As such, it is a binned random generator
and thus has some fundamental limitations.
- The expected number of counts per bin is drawn ra
There are a few limitations of this procedure:
- Colors are drawn from the CMD of the background annulus
- The number of stars per CMD bin is randomized according to the CMD
- The colors/mags are then uniformly distributed within the bin
- This leads to trouble with large bins when the cloud-in-cells
algorithm is applied to the simulated data
- The positions are chosen randomly over the spherical cap of the ROI
- Objects that are outside of the
WARNING: The cloud-in-cells method of generating
the CMD leads to some difficulties since it disperses
objects from high-density zones to low density zones.
- Magnitudes are not randomized according to their errors
"""
if seed is not None: np.random.seed(seed)
self._setup_cmd()
# Randomize the number of stars per bin according to Poisson distribution
nstar_per_bin = np.random.poisson(lam=self.bkg_lambda)
nstar = nstar_per_bin.sum()
logger.info("Simulating %i background stars..."%nstar)
if not self.config['simulate'].get('uniform'):
logger.info("Generating colors from background CMD.")
# Distribute the stars within each CMD bin
delta_color = self.bkg_centers_color[1]-self.bkg_centers_color[0]
delta_mag = self.bkg_centers_mag[1]-self.bkg_centers_mag[0]
# Distribute points within each color-mag bins
xx,yy = np.meshgrid(self.bkg_centers_color,self.bkg_centers_mag)
color = np.repeat(xx.flatten(),repeats=nstar_per_bin.flatten())
color += np.random.uniform(-delta_color/2.,delta_color/2.,size=nstar)
mag_1 = np.repeat(yy.flatten(),repeats=nstar_per_bin.flatten())
mag_1 += np.random.uniform(-delta_mag/2.,delta_mag/2.,size=nstar)
else:
# Uniform color-magnitude distribution
logger.info("Generating uniform CMD.")
mag_1 = np.random.uniform(self.config['mag']['min'],self.config['mag']['max'],size=nstar)
color = np.random.uniform(self.config['color']['min'],self.config['color']['max'],size=nstar)
mag_2 = mag_1 - color
# Random points drawn from healpix subpixels
logger.info("Generating uniform positions...")
idx = np.random.randint(0,len(self.subpix)-1,size=nstar)
lon,lat = pix2ang(self.nside_subpixel,self.subpix[idx])
nside_pixel = self.nside_pixel
pix = ang2pix(nside_pixel, lon, lat)
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(nside_pixel))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(nside_pixel))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
mc_source_id = mc_source_id * np.ones(len(mag_1))
# ADW: Should magnitudes be randomized by the erros?
#mag_1 += (np.random.normal(size=len(mag_1)) * mag_err_1)
#mag_2 += (np.random.normal(size=len(mag_2)) * mag_err_2)
select = (mag_lim_1>mag_1)&(mag_lim_2>mag_2)
### # Make sure objects lie within the original cmd (should be done later...)
### select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, color, mag_1,
### self.roi.bins_color, self.roi.bins_mag) > 0)
logger.info("Clipping %i simulated background stars..."%(~select).sum())
hdu = ugali.observation.catalog.makeHDU(self.config,mag_1[select],mag_err_1[select],
mag_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog
def satellite(self,stellar_mass,distance_modulus,mc_source_id=1,seed=None,**kwargs):
"""
Create a simulated satellite. Returns a catalog object.
"""
if seed is not None: np.random.seed(seed)
isochrone = kwargs.pop('isochrone',self.isochrone)
kernel = kwargs.pop('kernel',self.kernel)
for k,v in kwargs.items():
if k in kernel.params.keys(): setattr(kernel,k,v)
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
logger.info("Simulating %i satellite stars..."%len(mag_1))
pix = ang2pix(self.config['coords']['nside_pixel'], lon, lat)
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2)
# Randomize magnitudes by their errors
mag_obs_1 = mag_1+np.random.normal(size=len(mag_1))*mag_err_1
mag_obs_2 = mag_2+np.random.normal(size=len(mag_2))*mag_err_2
#mag_obs_1 = mag_1
#mag_obs_2 = mag_2
#select = np.logical_and(mag_obs_1 < mag_lim_1, mag_obs_2 < mag_lim_2)
select = (mag_lim_1>mag_obs_1)&(mag_lim_2>mag_obs_2)
# Make sure objects lie within the original cmd (should also be done later...)
#select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, mag_obs_1 - mag_obs_2, mag_obs_1,self.roi.bins_color, self.roi.bins_mag) > 0)
#return mag_1_obs[cut], mag_2_obs[cut], lon[cut], lat[cut]
logger.info("Clipping %i simulated satellite stars..."%(~select).sum())
mc_source_id = mc_source_id * np.ones(len(mag_1))
hdu = ugali.observation.catalog.makeHDU(self.config,mag_obs_1[select],mag_err_1[select],
mag_obs_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog
def satellite2(self,stellar_mass,distance_modulus,mc_source_id=1,seed=None,**kwargs):
"""
Create a simulated satellite. Returns a catalog object.
"""
if seed is not None: np.random.seed(seed)
isochrone = kwargs.pop('isochrone',self.isochrone)
kernel = kwargs.pop('kernel',self.kernel)
for k,v in kwargs.items():
if k in kernel.params.keys(): setattr(kernel,k,v)
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
logger.info("Simulating %i satellite stars..."%len(mag_1))
pix = ang2pix(self.config['coords']['nside_pixel'], lon, lat)
# There is probably a better way to do this step without creating the full HEALPix map
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse
mag_lim_1 = mask[pix]
mask = -1. * np.ones(hp.nside2npix(self.config['coords']['nside_pixel']))
mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse
mag_lim_2 = mask[pix]
mag_err_1 = self.mask.photo_err_1(mag_lim_1 - mag_1)
mag_err_2 = self.mask.photo_err_2(mag_lim_2 - mag_2)
# Completeness is a function of true magnitude
method = 'step'
if method is None or method == 'none':
comp = np.ones(len(mag_1))
elif self.config['catalog']['band_1_detection']:
comp=self.mask.completeness(mag_lim_1-mag_1, method=method)
elif not self.config['catalog']['band_1_detection']:
comp=self.mask.completeness(mag_lim_2-mag_2, method=method)
else:
comp_1 = self.mask.completeness(mag_lim_1-mag_1, method=method)
comp_2 = self.mask.completeness(mag_lim_2-mag_2, method=method)
comp = comp_1*comp_2
accept = comp > 1 - np.random.uniform(size=len(mag_1))
# Randomize magnitudes by their errors
mag_obs_1 = mag_1 + (np.random.normal(size=len(mag_1))*mag_err_1)
mag_obs_2 = mag_2 + (np.random.normal(size=len(mag_2))*mag_err_2)
#select = np.logical_and(mag_obs_1 < mag_lim_1, mag_obs_2 < mag_lim_2)
select = (mag_lim_1>mag_obs_1)&(mag_lim_2>mag_obs_2)&accept
### # Make sure objects lie within the original cmd (should also be done later...)
### select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, color, mag_1,
### self.roi.bins_color, self.roi.bins_mag) > 0)
#return mag_1_obs[cut], mag_2_obs[cut], lon[cut], lat[cut]
logger.info("Clipping %i simulated satellite stars..."%(~select).sum())
mc_source_id = mc_source_id * np.ones(len(mag_1))
hdu = ugali.observation.catalog.makeHDU(self.config,mag_obs_1[select],mag_err_1[select],
mag_obs_2[select],mag_err_2[select],
lon[select],lat[select],mc_source_id[select])
catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data)
return catalog
def simulate(self, seed=None, **kwargs):
if seed is not None: np.random.seed(seed)
logger.info("Simulating object catalog...")
catalogs = []
#catalogs.append(self.toy_background(seed=seed))
catalogs.append(self.background(seed=seed))
catalogs.append(self.satellite(seed=seed,**kwargs))
logger.info("Merging simulated catalogs...")
catalog = ugali.observation.catalog.mergeCatalogs(catalogs)
nsig = (catalog.mc_source_id == 1).sum()
nbkg = (catalog.mc_source_id == 2).sum()
logger.info("Simulated catalog contains: %i background stars"%nbkg)
logger.info("Simulated catalog contains: %i satellite stars"%nsig)
return catalog
def makeHDU(self, mag_1, mag_err_1, mag_2, mag_err_2, lon, lat, mc_source_id):
"""
Create a catalog fits file object based on input data.
ADW: This should be combined with the write_membership
function of loglike.
"""
if self.config['catalog']['coordsys'].lower() == 'cel' \
and self.config['coords']['coordsys'].lower() == 'gal':
lon, lat = ugali.utils.projector.gal2cel(lon, lat)
elif self.config['catalog']['coordsys'].lower() == 'gal' \
and self.config['coords']['coordsys'].lower() == 'cel':
lon, lat = ugali.utils.projector.cel2gal(lon, lat)
columns = [
pyfits.Column(name=self.config['catalog']['objid_field'],
format = 'D',array = np.arange(len(lon))),
pyfits.Column(name=self.config['catalog']['lon_field'],
format = 'D',array = lon),
pyfits.Column(name = self.config['catalog']['lat_field'],
format = 'D',array = lat),
pyfits.Column(name = self.config['catalog']['mag_1_field'],
format = 'E',array = mag_1),
pyfits.Column(name = self.config['catalog']['mag_err_1_field'],
format = 'E',array = mag_err_1),
pyfits.Column(name = self.config['catalog']['mag_2_field'],
format = 'E',array = mag_2),
pyfits.Column(name = self.config['catalog']['mag_err_2_field'],
format = 'E',array = mag_err_2),
pyfits.Column(name = self.config['catalog']['mc_source_id_field'],
format = 'I',array = mc_source_id),
]
hdu = pyfits.new_table(columns)
return hdu
def write(self, outfile):
"""
"""
pass
############################################################
class Analyzer(object):
"""
Class for generating the parameters of the simulation.
"""
def __init__(self, config, seed=None):
self.config = Config(config)
def create_population(self):
if self.config['simulate']['popfile']:
filename = os.path.join(self.config['simulate']['dirname'],self.config['simulate']['popfile'])
population = fitsio.read(filename)
else:
size = self.config['simulate']['size']
population = self.generate(size)
self.population = population
return self.population
def write(self, filename, data=None):
""" Write the output results """
if data is None: data = self.results
logger.info("Writing %s..."%filename)
if filename.endswith('.npy'):
np.save(filename,data)
elif filename.endswith('.fits'):
# Copies data, so be careful..
out = np.rec.array(data)
out.dtype.names = np.char.upper(out.dtype.names)
hdu = pyfits.new_table(out)
hdu.writeto(filename,clobber=True)
elif filename.endswith('.txt') or filename.endswith('.dat'):
np.savetxt(filename,data)
elif filename.endswith('.csv'):
np.savetxt(filename,data,delimiter=',')
else:
raise Exception('Unrecognized file extension: %s'%filename)
def run(self, catalog=None, outfile=None):
#if size is None: size = self.config['simulate']['size']
#data = self.generate(size)
data = self.create_population()
size = len(data)
dtype=[('kernel','S18'),('ts','>f4'),('fit_kernel','S18'),('fit_ts','>f4'),
('fit_mass','>f4'),('fit_mass_err','>f4'),
('fit_distance','>f4'),('fit_distance_err','>f4')]
results = np.array(np.nan*np.ones(size),dtype=dtype)
results = recfuncs.merge_arrays([data,results],flatten=True,asrecarray=False,usemask=False)
self.results = results
if outfile: self.write(outfile,results)
for i,d in enumerate(data):
params = dict(list(zip(data.dtype.names,d)))
lon,lat = params['ra'],params['dec']
distance_modulus = params['distance_modulus']
logger.info('\n(%i/%i); (lon, lat) = (%.2f, %.2f)'%(i+1,len(data),lon,lat))
roi = ugali.analysis.loglike.createROI(self.config,lon,lat)
mask = ugali.analysis.loglike.createMask(self.config,roi)
isochrone = ugali.analysis.loglike.createIsochrone(self.config)
kernel = ugali.analysis.loglike.createKernel(self.config,lon=lon,lat=lat)
pix = roi.indexTarget(lon,lat)
if not config['simulate']['catfile']:
simulator = Simulator(self.config,roi)
#catalog = simulator.simulate(seed=self.seed, **params)
catalog = simulator.simulate(**params)
#print "Catalog annulus contains:",roi.inAnnulus(simulator.catalog.lon,simulator.catalog.lat).sum()
else:
pass
import pdb; pdb.set_trace()
logger.info("Simulated catalog annulus contains %i stars"%roi.inAnnulus(catalog.lon,catalog.lat).sum())
if len(catalog.lon) < 1000:
logger.error("Simulation contains too few objects; skipping...")
continue
"""
like = ugali.analysis.loglike.LogLikelihood(self.config, roi, mask, catalog, isochrone, kernel)
like.set_params(distance_modulus=params['distance_modulus'])
like.sync_params()
results[i]['ts'] = 2*like.fit_richness()[0]
print 'TS=',results[i]['ts']
like2 = ugali.analysis.loglike.LogLikelihood(self.config, roi, mask, simulator.catalog, isochrone, kernel)
like2.set_params(distance_modulus=params['distance_modulus'])
like2.sync_params()
print 'TS=',2*like2.fit_richness()[0]
"""
#return simulator,like,like2
# Index of closest distance modulus
grid = ugali.analysis.scan.GridSearch(self.config,roi,mask,catalog,isochrone,kernel)
self.catalog = catalog
self.simulator = simulator
self.grid = grid
self.loglike = self.grid.loglike
# ADW: Should allow fit_distance to float in order to model search procedure
#fit_distance = float(distance_modulus)
distance_idx = np.fabs(grid.distance_modulus_array-params['distance_modulus']).argmin()
fit_distance = grid.distance_modulus_array[distance_idx]
grid.search(coords=(lon,lat),distance_modulus=fit_distance)
logger.info(str(self.loglike))
mle = grid.mle()
results[i]['kernel'] = simulator.kernel.name
results[i]['fit_kernel'] = grid.loglike.kernel.name
results[i]['ts'] = 2*grid.log_likelihood_sparse_array[distance_idx][pix]
results[i]['fit_ts'] = 2*np.max(grid.log_likelihood_sparse_array[:,pix])
results[i]['fit_mass'] = grid.stellar_mass_conversion*mle['richness']
results[i]['fit_distance'] = fit_distance #mle['distance_modulus']
err = grid.err()
richness_err = (err['richness'][1]-err['richness'][0])/2.
results[i]['fit_mass_err'] = grid.stellar_mass_conversion*richness_err
distance_modulus_err = (err['distance_modulus'][1]-err['distance_modulus'][0])/2.
results[i]['fit_distance_err'] = distance_modulus_err
for d in dtype:
logger.info('\t%s: %s'%(d[0], results[i][d[0]]))
if i%self.config['simulate']['save']==0 and outfile:
self.write(outfile,results)
if outfile: self.write(outfile,results)
return results
############################################################
def satellite(isochrone, kernel, stellar_mass, distance_modulus,**kwargs):
"""
Wrapping the isochrone and kernel simulate functions.
"""
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
return mag_1, mag_2, lon, lat
############################################################
if __name__ == "__main__":
import ugali.utils.parser
description = "Script for executing the likelihood scan."
parser = ugali.utils.parser.Parser(description=description)
parser.add_config()
parser.add_argument('outfile',metavar='outfile.fits',help='Output fits file.')
parser.add_debug()
parser.add_verbose()
parser.add_seed()
opts = parser.parse_args()
config = Config(opts.config)
generator = Generator(config,opts.seed)
sim,like1,like2 = generator.run(opts.outfile)
|
|
from wxPython._controls import wxLIST_MASK_STATE
from wxPython._controls import wxLIST_STATE_SELECTED
import os.path
# Modified by Francois Malan, LUMC / TU Delft
# December 2009
#
# based on the SkeletonAUIViewer:
# skeleton of an AUI-based viewer module
# Copyright (c) Charl P. Botha, TU Delft.
# set to False for 3D viewer, True for 2D image viewer
IMAGE_VIEWER = False
# import the frame, i.e. the wx window containing everything
import MaskComBinarFrame
# and do a reload, so that the GUI is also updated at reloads of this
# module.
reload(MaskComBinarFrame)
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin
import module_utils
import os
import vtk
import itk
import wx
import copy
import subprocess
#import numpy as np
from OverlaySliceViewer import OverlaySliceViewer
class Mask(object):
def __init__(self, name, file_path, image_data):
self.name = name
self.file_path = file_path
self.data = image_data
# def deepcopy(self):
# return Mask(self.name, self.file_path, self.data.DeepCopy())
class MaskComBinar(IntrospectModuleMixin, ModuleBase):
def __init__(self, module_manager):
"""Standard constructor. All DeVIDE modules have these, we do
the required setup actions.
"""
# we record the setting here, in case the user changes it
# during the lifetime of this model, leading to different
# states at init and shutdown.
self.IMAGE_VIEWER = IMAGE_VIEWER
ModuleBase.__init__(self, module_manager)
# create the view frame
self._view_frame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
MaskComBinarFrame.MaskComBinarFrame)
# change the title to something more spectacular
self._view_frame.SetTitle('MaskComBinar - a tool for measuring and manipulating binary masks')
#initialise data structures
self._init_data_structures()
self._init_2d_render_window()
self._init_3d_render_window()
self.reset_camera_on_mask_display = True
self.first_save_warning = True
# hook up all event handlers
self._bind_events()
# anything you stuff into self._config will be saved
self._config.last_used_dir = ''
# make our window appear (this is a viewer after all)
self.view()
# all modules should toggle this once they have shown their
# views.
self.view_initialised = True
# apply config information to underlying logic
self.sync_module_logic_with_config()
# then bring it all the way up again to the view
self.sync_module_view_with_logic()
#This tool can be used for introspection of wx components
#
def _init_2d_render_window(self):
#create the necessary VTK objects for the 2D window. We use Charl's CMSliceViewer
#which defines all the nice goodies we'll need
self.ren2d = vtk.vtkRenderer()
self.ren2d.SetBackground(0.4,0.4,0.4)
self.slice_viewer = OverlaySliceViewer(self._view_frame.rwi2d, self.ren2d)
self._view_frame.rwi2d.GetRenderWindow().AddRenderer(self.ren2d)
self.slice_viewer.add_overlay('a', [0, 0, 1, 1]) #Blue for selection A
self.slice_viewer.add_overlay('b', [1, 0, 0, 1]) #Red for selection B
self.slice_viewer.add_overlay('intersect', [1, 1, 0, 1]) #Yellow for for intersection
def _init_3d_render_window(self):
# create the necessary VTK objects for the 3D window: we only need a renderer,
# the RenderWindowInteractor in the view_frame has the rest.
self.ren3d = vtk.vtkRenderer()
self.ren3d.SetBackground(0.6,0.6,0.6)
self._view_frame.rwi3d.GetRenderWindow().AddRenderer(self.ren3d)
def _init_data_structures(self):
self.opacity_3d = 0.5
self.rgb_blue = [0,0,1]
self.rgb_red = [1,0,0]
self.rgb_yellow = [1,1,0]
self.masks = {}
self.surfaces = {} #This prevents recomputing surface meshes
self.actors3d = {}
self.rendered_masks_in_a = set()
self.rendered_masks_in_b = set()
self.rendered_overlap = False
def _load_mask_from_file(self, file_path):
print "Opening file: %s" % (file_path)
filename = os.path.split(file_path)[1]
reader = None
extension = os.path.splitext(filename)[1]
if extension == '.vti': # VTI
reader = vtk.vtkXMLImageDataReader()
elif extension == '.mha': # MHA
reader = vtk.vtkMetaImageReader()
else:
self._view_frame.dialog_error('Unknown file extension: %s' % extension, 'Unable to handle extension')
return
reader.SetFileName(file_path)
reader.Update()
result = vtk.vtkImageData()
result.DeepCopy(reader.GetOutput())
return result
def load_binary_mask_from_file(self, file_path):
mask_image_data = self._load_mask_from_file(file_path)
filename = os.path.split(file_path)[1]
fileBaseName =os.path.splitext(filename)[0]
mask = Mask(fileBaseName, file_path, mask_image_data)
self.add_mask(mask)
def load_multi_mask_from_file(self, file_path):
mask_image_data = self._load_mask_from_file(file_path)
filename = os.path.split(file_path)[1]
fileBaseName =os.path.splitext(filename)[0]
#Now we have to create a separate mask for each integer level.
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(mask_image_data)
accumulator.Update()
max_label = int(accumulator.GetMax()[0])
#We assume all labels to have positive values.
for i in range(1,max_label+1):
label_data = self._threshold_image(mask_image_data, i, i)
new_name = '%s_%d' % (fileBaseName, i)
mask = Mask(new_name, file_path, label_data)
self.add_mask(mask)
def save_mask_to_file(self, mask_name, file_path):
if os.path.exists(file_path):
result = self._view_frame.dialog_yesno("%s already exists! \nOverwrite?" % file_path,"File already exists")
if result == False:
print 'Skipped writing %s' % file_path
return #skip this file if overwrite is denied
mask = self.masks[mask_name]
mask.file_path = file_path
self._save_image_to_file(mask.data, file_path)
print 'Wrote mask %s to %s' % (mask_name, file_path)
def _save_image_to_file(self, imagedata, file_path):
filename = os.path.split(file_path)[1]
extension = os.path.splitext(filename)[1]
writer = None
if extension == '.vti': # VTI
writer = vtk.vtkXMLImageDataWriter()
elif extension == '.mha': # MHA
print 'Attempting to create an mha writer. This has failed in the past (?)'
writer = vtk.vtkMetaImageWriter()
writer.SetCompression(True)
else:
self._view_frame.dialog_error('Unknown file extension: %s' % extension, 'Unable to handle extension')
return
writer.SetInput(imagedata)
writer.SetFileName(file_path)
writer.Update()
result = writer.Write()
if result == 0:
self._view_frame.dialog_error('Error writing %s' % filename, 'Error writing file')
print 'ERROR WRITING FILE!!!'
else:
self._view_frame.dialog_info('Successfully wrote %s' % filename, 'Success')
print 'Successfully wrote %s' % file_path
def add_mask(self, mask):
[accept, name] = self._view_frame.dialog_inputtext('Please choose a name for the new mask','Choose a name', mask.name)
if accept:
mask.name = name
if self.masks.has_key(name):
i=1
new_name = '%s%d' % (name, i)
while self.masks.has_key(new_name):
i += 1
new_name = '%s%d' % (mask.name, i)
mask.name = new_name
self.masks[mask.name] = mask
self._view_frame.add_mask(mask.name)
def delete_masks(self, mask_names):
temp = mask_names.copy()
if len(mask_names) > 0:
mask_names_str = mask_names.pop()
while len(mask_names) > 0:
mask_names_str = mask_names_str + ',%s' % mask_names.pop()
mask_names = temp
if self._view_frame.dialog_yesno('Are you sure you want to delete the following masks: %s' % mask_names_str, 'Delete masks?'):
for mask_name in mask_names:
print 'deleting mask: %s' % mask_name
if self.masks.has_key(mask_name):
self.masks.pop(mask_name)
self._view_frame.delete_mask(mask_name)
else:
self._view_frame.dialog_error('Mask "%s" not found in internal mask list!' % mask_name, 'Mask not found')
if len(self.masks) == 0: #If there are no masks left we disable the 2D viewer's pickable plane
self.slice_viewer.set_input(0, None)
def close(self):
"""Clean-up method called on all DeVIDE modules when they are
deleted.
"""
# with this complicated de-init, we make sure that VTK is
# properly taken care of
self.ren2d.RemoveAllViewProps()
self.ren3d.RemoveAllViewProps()
# this finalize makes sure we don't get any strange X
# errors when we kill the module.
self._view_frame.rwi2d.GetRenderWindow().Finalize()
self._view_frame.rwi2d.SetRenderWindow(None)
del self._view_frame.rwi2d
self._view_frame.rwi3d.GetRenderWindow().Finalize()
self._view_frame.rwi3d.SetRenderWindow(None)
del self._view_frame.rwi3d
# done with VTK de-init
# now take care of the wx window
self._view_frame.close()
# then shutdown our introspection mixin
IntrospectModuleMixin.close(self)
def get_input_descriptions(self):
# define this as a tuple of input descriptions if you want to
# take input data e.g. return ('vtkPolyData', 'my kind of
# data')
return ()
def get_output_descriptions(self):
# define this as a tuple of output descriptions if you want to
# generate output data.
return ()
def set_input(self, idx, input_stream):
# this gets called right before you get executed. take the
# input_stream and store it so that it's available during
# execute_module()
pass
def get_output(self, idx):
# this can get called at any time when a consumer module wants
# your output data.
pass
def execute_module(self):
# when it's your turn to execute as part of a network
# execution, this gets called.
pass
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def config_to_view(self):
pass
def view_to_config(self):
pass
def view(self):
self._view_frame.Show()
self._view_frame.Raise()
# because we have an RWI involved, we have to do this
# SafeYield, so that the window does actually appear before we
# call the render. If we don't do this, we get an initial
# empty renderwindow.
wx.SafeYield()
self.render()
def _update_3d_masks(self, id, removed, added):
rgb_colour = [0,0,0]
if id == 'a':
rgb_colour = self.rgb_blue
elif id == 'b':
rgb_colour = self.rgb_red
for name in removed:
key = id + name
self.ren3d.RemoveActor(self.actors3d[key])
self.render()
for name in added:
self._render_3d_mask(id, name, rgb_colour, self.opacity_3d)
def _update_3d_masks_overlapping(self, mask_a, mask_b, mask_intersect):
self._clear_3d_window()
self._render_3d_data('a_not_b', mask_a.data, self.rgb_blue, self.opacity_3d)
self._render_3d_data('b_not_a', mask_b.data, self.rgb_red, self.opacity_3d)
self._render_3d_data('a_and_b', mask_intersect.data, self.rgb_yellow, self.opacity_3d)
def _clear_3d_window(self):
for actor in self.actors3d.values():
self.ren3d.RemoveActor(actor)
self.ren3d.Clear()
self.rendered_masks_in_a = set()
self.rendered_masks_in_b = set()
self.rendered_overlap = False
def _render_2d_mask(self, id, mask):
mask_data = None
if mask != None:
mask_data = mask.data
self.slice_viewer.set_input(id, mask_data)
if self.reset_camera_on_mask_display:
self.slice_viewer.reset_camera()
#self.slice_viewer.reset_to_default_view(2)
self.slice_viewer.render()
def _render_3d_mask(self, id, name, rgb_colour, opacity):
"""Add the given mask to the 3D display window.
An iso-surface of colour rgb_colour is rendered at value = 1.
"""
surface = None
mask = self.masks[name]
if not self.surfaces.has_key(name):
surface_creator = vtk.vtkDiscreteMarchingCubes()
surface_creator.SetInput(mask.data)
surface_creator.Update()
surface = surface_creator.GetOutput()
self.surfaces[name] = surface
else:
surface = self.surfaces[name]
m = vtk.vtkPolyDataMapper()
m.SetInput(surface)
m.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(m)
actor.SetPosition(mask.data.GetOrigin())
actor.GetProperty().SetColor(rgb_colour)
actor.GetProperty().SetOpacity(opacity)
#actor.GetProperty().SetInterpolationToFlat()
self.ren3d.AddActor(actor)
self.actors3d[id+name] = actor
if self.reset_camera_on_mask_display:
self.ren3d.ResetCamera()
self.render()
def _render_3d_data(self, id, data, rgb_colour, opacity):
"""Add the given mask to the 3D display window.
An iso-surface of colour rgb_colour is rendered at value = 1.
"""
surface_creator = vtk.vtkDiscreteMarchingCubes()
surface_creator.SetInput(data)
surface_creator.Update()
surface = surface_creator.GetOutput()
m = vtk.vtkPolyDataMapper()
m.SetInput(surface)
m.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(m)
actor.SetPosition(data.GetOrigin())
actor.GetProperty().SetColor(rgb_colour)
actor.GetProperty().SetOpacity(opacity)
#actor.GetProperty().SetInterpolationToFlat()
self.ren3d.AddActor(actor)
self.actors3d[id] = actor
if self.reset_camera_on_mask_display:
self.ren3d.ResetCamera()
self.render()
def _bind_events(self):
"""Bind wx events to Python callable object event handlers.
"""
vf = self._view_frame
vf.Bind(wx.EVT_MENU, self._handler_open_binary_mask,
id = vf.id_open_binary_mask)
vf.Bind(wx.EVT_MENU, self._handler_open_multi_mask,
id = vf.id_open_multi_mask)
vf.Bind(wx.EVT_MENU, self._handler_save_multi_mask,
id = vf.id_save_multi_mask)
vf.Bind(wx.EVT_MENU, self._handler_open_mask_dir,
id = vf.id_open_mask_dir)
vf.Bind(wx.EVT_MENU, self._handler_save_mask,
id = vf.id_save_mask)
vf.Bind(wx.EVT_MENU, self._handler_close,
id = vf.id_quit)
vf.Bind(wx.EVT_MENU, self._handler_introspect,
id = vf.id_introspect)
vf.Bind(wx.EVT_MENU, self._handler_about,
id = vf.id_about)
self._view_frame.reset_cam2d_button.Bind(wx.EVT_BUTTON,
self._handler_reset_cam2d_button)
self._view_frame.reset_cam3d_button.Bind(wx.EVT_BUTTON,
self._handler_reset_cam3d_button)
self._view_frame.clear_selection_button.Bind(wx.EVT_BUTTON,
self._handler_clear_selection_button)
self._view_frame.list_ctrl_maskA.Bind(wx.EVT_LIST_ITEM_SELECTED, self._handler_listctrl)
self._view_frame.list_ctrl_maskA.Bind(wx.EVT_LIST_ITEM_DESELECTED, self._handler_listctrl)
self._view_frame.list_ctrl_maskB.Bind(wx.EVT_LIST_ITEM_SELECTED, self._handler_listctrl)
self._view_frame.list_ctrl_maskB.Bind(wx.EVT_LIST_ITEM_DESELECTED, self._handler_listctrl)
self._view_frame.list_ctrl_maskA.Bind(wx.EVT_LIST_KEY_DOWN, self._handler_delete_mask_a)
self._view_frame.list_ctrl_maskB.Bind(wx.EVT_LIST_KEY_DOWN, self._handler_delete_mask_b)
#Mask operations
self._view_frame.mask_join_button.Bind(wx.EVT_BUTTON, self._handler_mask_join)
self._view_frame.mask_subtract_button.Bind(wx.EVT_BUTTON, self._handler_mask_subtract)
self._view_frame.mask_intersect_button.Bind(wx.EVT_BUTTON, self._handler_mask_intersect)
self._view_frame.mask_align_metadata_button.Bind(wx.EVT_BUTTON, self._handler_align_masks_metadata)
self._view_frame.mask_align_icp_button.Bind(wx.EVT_BUTTON, self._handler_align_masks_icp)
self._view_frame.split_disconnected_button.Bind(wx.EVT_BUTTON, self._handler_split_disconnected)
#Mask diagnostics
self._view_frame.test_all_dimensions_button.Bind(wx.EVT_BUTTON, self._handler_test_all_dimensions)
self._view_frame.test_selected_dimensions_button.Bind(wx.EVT_BUTTON, self._handler_test_selected_dimensions)
self._view_frame.test_all_intersections_button.Bind(wx.EVT_BUTTON, self._handler_test_all_intersections)
self._view_frame.test_selected_intersections_button.Bind(wx.EVT_BUTTON, self._handler_test_selected_intersections)
#Mask metrics
self._view_frame.volume_button.Bind(wx.EVT_BUTTON, self._handler_compute_volume)
self._view_frame.dice_coefficient_button.Bind(wx.EVT_BUTTON, self._handler_compute_dice_coefficient)
self._view_frame.hausdorff_distance_button.Bind(wx.EVT_BUTTON, self._handler_compute_hausdorff_distance)
self._view_frame.mean_hausdorff_distance_button.Bind(wx.EVT_BUTTON, self._handler_compute_mean_hausdorff_distance)
#self._view_frame.Bind(wx.EVT_SLIDER, self._handler_slider_update)
def _handler_reset_cam2d_button(self, event):
#self.slice_viewer.reset_camera()
self.slice_viewer.reset_to_default_view(2)
self.render()
def _handler_reset_cam3d_button(self, event):
self.ren3d.ResetCamera()
self.render()
def _handler_clear_selection_button(self, event):
self._view_frame.clear_selections()
self._clear_3d_window()
self.slice_viewer.set_input(0, None)
self.slice_viewer.set_input('a', None)
self.slice_viewer.set_input('b', None)
self.slice_viewer.set_input('intersect', None)
self.render()
def _handler_delete_mask_a(self, event):
'''Handler for deleting an mask from either of the two lists (acts on both)'''
if event.KeyCode == 127: #This is the keycode for "delete"
names_a = self._view_frame.get_selected_mask_names_a()
if len(names_a) > 0:
self.delete_masks(names_a)
def _handler_delete_mask_b(self, event):
'''Handler for deleting an mask from either of the two lists (acts on both)'''
if event.KeyCode == 127: #This is the keycode for "delete"
names_b = self._view_frame.get_selected_mask_names_b()
if len(names_b) > 0:
self.delete_masks(names_b)
def _handler_listctrl(self, event):
"""Mask is selected or deselected in listcontrol A"""
if self.rendered_overlap:
self._clear_3d_window()
self.rendered_overlap = False
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
new_in_a = set()
new_in_b = set()
gone_from_a = set()
gone_from_b = set()
#Check what has changed
for name in names_a:
if not name in self.rendered_masks_in_a:
new_in_a.add(name)
for name in self.rendered_masks_in_a:
if not name in names_a:
gone_from_a.add(name)
#Update the list of selected items
self.rendered_masks_in_a = names_a
for name in names_b:
if not name in self.rendered_masks_in_b:
new_in_b.add(name)
for name in self.rendered_masks_in_b:
if not name in names_b:
gone_from_b.add(name)
#Update the list of selected items
self.rendered_masks_in_b = names_b
overlap = None
union_masks_a = None
union_masks_b = None
if (len(gone_from_a) > 0) or (len(new_in_a) > 0) or (len(gone_from_b) > 0) or (len(new_in_b) > 0):
union_masks_a = self.compute_mask_union(names_a)
union_masks_b = self.compute_mask_union(names_b)
self._render_2d_mask('a',union_masks_a)
self._render_2d_mask('b',union_masks_b)
overlap = self._logical_intersect_masks(union_masks_a, union_masks_b)
if self._is_empty_mask(overlap):
overlap = None
self._render_2d_mask('intersect',overlap)
if overlap == None:
#We don't need to render any custom mask - only a list of existing selected masks
self._update_3d_masks('a', gone_from_a, new_in_a)
self._update_3d_masks('b', gone_from_b, new_in_b)
else:
#We require a more expensive custom render to show overlapping areas in 3D
a_not_b = self._logical_subtract_masks(union_masks_a, overlap)
b_not_a = self._logical_subtract_masks(union_masks_b, overlap)
self._update_3d_masks_overlapping(a_not_b, b_not_a, overlap)
self.rendered_masks_in_a = {}
self.rendered_masks_in_b = {}
self.rendered_overlap = True
def _handler_open_binary_mask(self, event):
"""Opens a binary mask file"""
filters = 'Mask files (*.vti;*.mha)|*.vti;*.mha'
dlg = wx.FileDialog(self._view_frame, "Choose a binary mask file", self._config.last_used_dir, "", filters, wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
self._config.last_used_dir=dlg.GetDirectory()
full_file_path = "%s\\%s" % (self._config.last_used_dir, filename)
self.load_binary_mask_from_file(full_file_path)
dlg.Destroy()
def _handler_open_multi_mask(self, event):
"""Opens an integer-labeled multi-material mask file"""
filters = 'Mask files (*.vti;*.mha)|*.vti;*.mha'
dlg = wx.FileDialog(self._view_frame, "Choose a multi-label mask file", self._config.last_used_dir, "", filters, wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
self._config.last_used_dir=dlg.GetDirectory()
full_file_path = "%s\\%s" % (self._config.last_used_dir, filename)
self.load_multi_mask_from_file(full_file_path)
dlg.Destroy()
def _handler_open_mask_dir(self, event):
"""Opens all masks in a given directory"""
dlg = wx.DirDialog(self._view_frame, "Choose a directory containing masks", self._config.last_used_dir)
if dlg.ShowModal() == wx.ID_OK:
dir_name=dlg.GetPath()
self._config.last_used_dir=dir_name
all_files = os.listdir(dir_name)
#First we set up actor list of files with the correct extension
file_list = []
source_ext = '.vti'
for f in all_files:
file_name = os.path.splitext(f)
if file_name[1] == source_ext:
file_list.append(f)
for filename in file_list:
full_file_path = "%s\\%s" % (dir_name, filename)
self.load_binary_mask_from_file(full_file_path)
dlg.Destroy()
print 'Done!'
def _specify_output_file_path(self):
file_path = None
filters = 'Mask files (*.vti;*.mha)|*.vti;*.mha'
dlg = wx.FileDialog(self._view_frame, "Choose a destination", self._config.last_used_dir, "", filters, wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
self._config.last_used_dir=dlg.GetDirectory()
file_path = "%s\\%s" % (self._config.last_used_dir, filename)
dlg.Destroy()
return file_path
def _handler_save_multi_mask(self, event):
"""Saves a multi-label mask file"""
if self.test_valid_mask_selection_multiple():
file_path = self._specify_output_file_path()
if file_path != None:
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
names = set()
for mask_name in names_a:
names.add(mask_name)
for mask_name in names_b:
names.add(mask_name)
mask_name = names.pop()
imagedata = vtk.vtkImageData()
maskdata = self.masks[mask_name].data
imagedata.DeepCopy(maskdata)
k = 1
for mask_name in names:
k = k+1
maskdata = self.masks[mask_name].data
imath = vtk.vtkImageMathematics()
imath.SetOperationToMultiplyByK()
imath.SetConstantK(k)
print 'Multiplying %s with %d and adding to volume' % (mask_name, k)
imath.SetInput(maskdata)
imath.Update()
adder = vtk.vtkImageMathematics()
adder.SetOperationToAdd()
adder.SetInput1(imagedata)
adder.SetInput2(imath.GetOutput())
adder.Update()
imagedata.DeepCopy(adder.GetOutput())
self._save_image_to_file(imagedata, file_path)
print 'Wrote multi-label mask with %d labels to %s' % (k, file_path)
def _handler_save_mask(self, event):
"""Saves a mask file"""
if self.test_single_mask_selection():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
mask_name = ''
if len(names_b) == 1:
mask_name = names_b.pop()
else:
mask_name = names_a.pop()
file_path = self._specify_output_file_path()
if mask_name != None:
self.save_mask_to_file(mask_name, file_path)
else:
self._view_frame.dialog_exclaim("No valid file name specified")
def _handler_align_masks_metadata(self, event):
"""Aligns two masks by copying metadata from the first to the second (origin, spacing, extent, wholeextent)
As always, creates a new mask in the list of masks as output.
"""
if self.test_single_mask_pair_selection():
#We know that there is only a single mask selected in each of A and B, therefor we only index the 0th element in each
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
maskA = self.masks[names_a.pop()]
maskB = self.masks[names_b.pop()]
mask_data = vtk.vtkImageData()
mask_data.DeepCopy(maskB.data)
mask_data.SetOrigin(maskA.data.GetOrigin())
mask_data.SetExtent(maskA.data.GetExtent())
mask_data.SetWholeExtent(maskA.data.GetWholeExtent())
mask_data.SetSpacing(maskA.data.GetSpacing())
mask = Mask('%s_a' % maskB.name, maskB.file_path, mask_data)
self.add_mask(mask)
def _handler_split_disconnected(self, event):
"""Splits the selected mask into disconnected regions"""
if self.test_single_mask_selection():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
mask_name = ''
if len(names_b) == 1:
mask_name = names_b.pop()
else:
mask_name = names_a.pop()
self._split_disconnected_objects(mask_name)
def _handler_align_masks_icp(self, event):
"""Aligns two masks by using the Iterative Closest Point algorithm (rigid transformation)
As always, creates a new mask in the list of masks as output.
"""
if self.test_single_mask_pair_selection():
#We know that there is only a single mask selected in each of A and B, therefor we only index the 0th element in each
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
maskA = self.masks[names_a.pop()]
maskB = self.masks[names_b.pop()]
#We need meshes (polydata) as input to the ICP algorithm
meshA = None
meshB = None
#actually this should never happen, but let's keep it for making double sure
if not self.surfaces.has_key(maskA.name):
surface_creator_A = vtk.vtkDiscreteMarchingCubes()
surface_creator_A.SetInput(maskA.data)
surface_creator_A.Update()
meshA = surface_creator_A.GetOutput()
else:
meshA = self.surfaces[maskA.name]
#actually this should never happen, but let's keep it for making double sure
if not self.surfaces.has_key(maskB.name):
surface_creator_B = vtk.vtkDiscreteMarchingCubes()
surface_creator_B.SetInput(maskB.data)
surface_creator_B.Update()
meshB = surface_creator_B.GetOutput()
else:
meshB = self.surfaces[maskB.name]
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetMaximumNumberOfIterations(50)
icp.SetSource(meshA)
icp.SetTarget(meshB)
print 'Executing ICP alorithm'
icp.Update()
del meshA, meshB
reslicer = vtk.vtkImageReslice()
reslicer.SetInterpolationModeToNearestNeighbor()
#reslicer.SetInterpolationModeToCubic()
reslicer.SetInput(maskB.data)
reslicer.SetResliceTransform(icp)
reslicer.Update()
del maskA, maskB
result = vtk.vtkImageData()
result.DeepCopy(reslicer.GetOutput())
self.add_mask(Mask('Aligned','',result))
def _handler_compute_volume(self, event):
"""Computes the volume of of mask A (in milliliters)"""
if self.test_valid_mask_selection_a():
names_a = self._view_frame.get_selected_mask_names_a()
union_masksA = self.compute_mask_union(names_a)
spacing = union_masksA.data.GetSpacing()
voxel_volume = spacing[0] * spacing[1] * spacing[2]
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(union_masksA.data)
accumulator.Update()
nonzero_count = accumulator.GetMean()[0] * accumulator.GetVoxelCount()
volume = voxel_volume * nonzero_count / 1000.0
print "Volume = %.2f ml" % (volume)
copy_to_clipboard = self._view_frame.dialog_yesno('Volume = %f ml\n\nCopy to clipboard?' % volume, 'Volume = %.1f%% ml' % (volume))
if copy_to_clipboard:
self._view_frame.copy_text_to_clipboard('%f' % volume)
def _is_empty_mask(self, mask):
if mask == None:
return True
else:
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(mask.data)
accumulator.Update()
return accumulator.GetMax()[0] == 0
def _handler_compute_dice_coefficient(self, event):
"""Computes the Dice coefficient between selections in A and B
Implementation from Charl's coderunner code"""
if self.test_valid_mask_selection_a_and_b():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
union_masksA = self.compute_mask_union(names_a)
union_masksB = self.compute_mask_union(names_b)
# Given two binary volumes, this CodeRunner will implement
# the percentage volume overlap. This is useful for
# doing validation with ground truth / golden standard /
# manually segmented volumes. This is also called the Dice
# coefficient and ranges from 0.0 to 1.0.
# interesting paper w.r.t. segmentation validation:
# Valmet: A new validation tool for assessing and improving 3D object segmentation
# basic idea:
# threshold data (so we have >0 == 1 and everything else 0)
# then histogram into two bins.
threshes = []
for _ in range(2):
t = vtk.vtkImageThreshold()
threshes.append(t)
# anything equal to or lower than 0.0 will be "In"
t.ThresholdByLower(0.0)
# <= 0 -> 0
t.SetInValue(0)
# > 0 -> 1
t.SetOutValue(1)
t.SetOutputScalarTypeToUnsignedChar()
# have to stuff all components into one image
iac = vtk.vtkImageAppendComponents()
iac.SetInput(0, threshes[0].GetOutput())
iac.SetInput(1, threshes[1].GetOutput())
# generate 2 by 2 matrix (histogram)
ia = vtk.vtkImageAccumulate()
ia.SetInput(iac.GetOutput())
ia.SetComponentExtent(0,1, 0,1, 0,0)
threshes[0].SetInput(union_masksA.data)
threshes[1].SetInput(union_masksB.data)
ia.Update()
iasc = ia.GetOutput().GetPointData().GetScalars()
cells = [0] * 4
for i in range(4):
cells[i] = iasc.GetTuple1(i)
# tuple 0: not in actor, not in b
# tuple 1: in actor, not in b
# tuple 2: in b, not in actor
# tuple 3: in actor, in b
# percentage overlap: (a intersect b) / (a union b)
dice_coeff = (2 * cells[3] / (2* cells[3] + cells[1] + cells[2]))
print "Dice Coefficiet = %.2f" % (dice_coeff)
copy_to_clipboard = self._view_frame.dialog_yesno('Dice coefficient = %f\n\nCopy to clipboard?' % dice_coeff, '%.1f%% overlap' % (100*dice_coeff))
if copy_to_clipboard:
self._view_frame.copy_text_to_clipboard('%f' % dice_coeff)
def _compute_hausdorff_distances(self, maskA, maskB):
"""
Computes the Hausdorff Distance between selections in A and B.
Uses the external software tool Metro to do point-based mesh sampling
"""
#We need meshes (polydata) for computing the Hausdorff distances
meshA = None
meshB = None
#actually this should never happen, but let's keep it for making double sure
if not self.surfaces.has_key(maskA.name):
self._view_frame.dialog_exclaim('Mesh belonging to Mask A not found in list, and created on the fly. This is unexpected...', 'Unexpected program state')
surface_creator_A = vtk.vtkDiscreteMarchingCubes()
surface_creator_A.SetInput(maskA.data)
surface_creator_A.Update()
meshA = surface_creator_A.GetOutput()
else:
meshA = self.surfaces[maskA.name]
#actually this should never happen, but let's keep it for making double sure
if not self.surfaces.has_key(maskB.name):
self._view_frame.dialog_exclaim('Mesh belonging to Mask B not found in list, and created on the fly. This is unexpected...', 'Unexpected program state')
surface_creator_B = vtk.vtkDiscreteMarchingCubes()
surface_creator_B.SetInput(maskB.data)
surface_creator_B.Update()
meshB = surface_creator_B.GetOutput()
else:
meshB = self.surfaces[maskB.name]
filename_a = '@temp_mesh_a.ply'
filename_b = '@temp_mesh_b.ply'
ply_writer = vtk.vtkPLYWriter()
ply_writer.SetFileTypeToBinary()
print 'Writing temporary PLY mesh A = %s' % filename_a
ply_writer.SetFileName(filename_a)
ply_writer.SetInput(meshA)
ply_writer.Update()
print 'Writing temporary PLY mesh B = %s' % filename_b
ply_writer.SetFileName(filename_b)
ply_writer.SetInput(meshB)
ply_writer.Update()
command = 'metro.exe %s %s' % (filename_a, filename_b)
p = subprocess.Popen(command, shell=True, stdout = subprocess.PIPE)
outp = p.stdout.read() #The command line output from metro
if len(outp) < 50:
self._view_frame.dialog_error('Hausdorff distance computation requires Metro to be installed and available in the system path.\n\nMetro failed to execute.\n\nAborting.\n\nMetro may be downloaded from http://vcg.sourceforge.net/index.php/Metro', 'Metro was not found')
return
print 'Executing: %s' % command
print '....................................'
print outp
print '....................................'
index = outp.find('max')
hdf = float(outp[index+6:index+54].split()[0]) #Forward Hausdorff distance
index = outp.find('max', index+3)
hdb = float(outp[index+6:index+54].split()[0]) #Backward Hausdorff distance
index = outp.find('mean')
mhdf = float(outp[index+7:index+35].split()[0]) #Forward Mean Hausdorff distance
index = outp.find('mean', index+4)
mhdb = float(outp[index+7:index+35].split()[0]) #Backward Mean Hausdorff distance
hausdorff_distance = max(hdf, hdb)
mean_hausdorff_distance = 0.5 * (mhdf + mhdb)
print 'removing temporary files'
os.remove(filename_a)
os.remove(filename_b)
print 'done!'
print '\nSampled Hausdorff distance = %.4f\nSampled Mean Hausdorff distance = %.4f\n' % (hausdorff_distance, mean_hausdorff_distance)
return [hausdorff_distance, mean_hausdorff_distance]
def _handler_compute_hausdorff_distance(self, event):
"""
Computes the Hausdorff Distance between meshes in A and B.
Uses the external software tool Metro to do point-based mesh sampling
"""
if self.test_single_mask_pair_selection():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
maskA = self.masks[names_a.pop()]
maskB = self.masks[names_b.pop()]
[hausdorff_distance, _] = self._compute_hausdorff_distances(maskA, maskB)
copy_to_clipboard = self._view_frame.dialog_yesno('Hausdorff distance = %.4f mm\n\nCopy to clipboard?' % hausdorff_distance, 'Hausdorff Distance')
if copy_to_clipboard:
self._view_frame.copy_text_to_clipboard('%f' % hausdorff_distance)
def _handler_compute_mean_hausdorff_distance(self, event):
"""
Computes the Mean Hausdorff Distance between meshes in A and B.
Uses the external software tool Metro to do point-based mesh sampling
"""
if self.test_single_mask_pair_selection():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
maskA = self.masks[names_a.pop()]
maskB = self.masks[names_b.pop()]
[_, mean_hausdorff_distance] = self._compute_hausdorff_distances(maskA, maskB)
copy_to_clipboard = self._view_frame.dialog_yesno('Mean Hausdorff distance = %.4f mm\n\nCopy to clipboard?' % mean_hausdorff_distance, 'Mean Hausdorff distance')
if copy_to_clipboard:
self._view_frame.copy_text_to_clipboard('%f' % mean_hausdorff_distance)
def _handler_mask_join(self, event):
"""Computes the union of the masks selected in boxes A and B.
Saves the result as a new Mask
"""
if self.test_valid_mask_selection_any():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
if len(names_a) + len(names_b) < 2:
return
union_masksA = self.compute_mask_union(names_a)
union_masksB = self.compute_mask_union(names_b)
new_mask = None
if len(names_a) == 0:
new_mask = union_masksB
elif len(names_b) == 0:
new_mask = union_masksA
else:
new_mask = self._logical_unite_masks(union_masksA, union_masksB)
self.add_mask(new_mask)
def _handler_mask_subtract(self, event):
"""Subtracts the the union of the masks selected in box B from the union of the masks selected in box A.
Saves the result as a new Mask
"""
if self.test_valid_mask_selection_a_and_b():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
union_masksA = self.compute_mask_union(names_a)
union_masksB = self.compute_mask_union(names_b)
new_mask = self._logical_subtract_masks(union_masksA, union_masksB)
self.add_mask(new_mask)
def _handler_mask_intersect(self, event):
"""Intersects the the union of the masks selected in box A with the union of the masks selected in box B.
Saves the result as a new Mask
"""
if self.test_valid_mask_selection_a_and_b():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
union_masksA = self.compute_mask_union(names_a)
union_masksB = self.compute_mask_union(names_b)
new_mask = self._logical_intersect_masks(union_masksA, union_masksB)
self.add_mask(new_mask)
def _test_intersections(self, mask_name_list):
"""
Tests for intersections between the masks listed in mask_names
"""
mask_names = copy.copy(mask_name_list)
first_name = mask_names.pop()
data = self.masks[first_name].data
intersections_found = False
eight_bit = False
for mask_name in mask_names:
print 'adding %s' % mask_name
data2 = self.masks[mask_name].data
adder = vtk.vtkImageMathematics()
adder.SetOperationToAdd()
adder.SetInput1(data)
adder.SetInput2(data2)
adder.Update()
data = adder.GetOutput()
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(data)
accumulator.Update()
max = accumulator.GetMax()[0]
if max == 255:
eight_bit = True
elif max > 1:
intersections_found = True
else:
self._view_frame.dialog_info("No intersections found.\n(duplicate selections in A and B ignored).", "No intersections")
if eight_bit:
eight_bit_mask_names = ''
mask_names = copy.copy(mask_name_list)
for mask_name in mask_names:
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(self.masks[mask_name].data)
accumulator.Update()
if accumulator.GetMax()[0] == 255:
eight_bit_mask_names = '%s, "%s"' % (eight_bit_mask_names, mask_name)
eight_bit_mask_names = eight_bit_mask_names[2:] #Remove the first two characters for neat display purposes
self._view_frame.dialog_error("Masks should be binary. The following masks were found to be 8-bit:\n%s" % eight_bit_mask_names,"Non-binary mask found!")
elif intersections_found:
mask_name_pair_list = ''
mask_names = copy.copy(mask_name_list)
while len(mask_names) > 0:
name1 = mask_names.pop()
for name2 in mask_names:
adder = vtk.vtkImageMathematics()
adder.SetOperationToAdd()
adder.SetInput1(self.masks[name1].data)
adder.SetInput2(self.masks[name2].data)
adder.Update()
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(adder.GetOutput())
accumulator.Update()
if accumulator.GetMax()[0] == 2:
mask_name_pair_list = '%s,\n ("%s","%s")' % (mask_name_pair_list, name1, name2)
mask_name_pair_list = mask_name_pair_list[2:] #Remove the first two characters for neat display purposes
self._view_frame.dialog_exclaim("Intersections found between the following mask pairs:\n%s" % mask_name_pair_list,"Intersections found!")
def _test_dimensions(self, mask_names, msg):
"""
Tests whether the given masks have matching volumetric dimensions.
In practice mismatches can occur due to problems with feature generation algorithms (such as filtered backprojection)
"""
masks_by_dimensions = {}
masks_by_extent = {}
masks_by_whole_extent = {}
masks_by_spacing = {}
for mask_name in mask_names:
maskdata = self.masks[mask_name].data
dimensions = maskdata.GetDimensions()
spacing = maskdata.GetSpacing()
extent = maskdata.GetExtent()
whole_extent = maskdata.GetWholeExtent()
if not masks_by_dimensions.has_key(dimensions):
masks_by_dimensions[dimensions] = [str(mask_name)]
else:
masks_by_dimensions[dimensions].append(str(mask_name))
if not masks_by_spacing.has_key(spacing):
masks_by_spacing[spacing] = [str(mask_name)]
else:
masks_by_spacing[spacing].append(str(mask_name))
if not masks_by_extent.has_key(extent):
masks_by_extent[extent] = [str(mask_name)]
else:
masks_by_extent[extent].append(str(mask_name))
if not masks_by_whole_extent.has_key(whole_extent):
masks_by_whole_extent[whole_extent] = [str(mask_name)]
else:
masks_by_whole_extent[whole_extent].append(str(mask_name))
if len(masks_by_dimensions.keys()) == 1 and len(masks_by_spacing.keys()) == 1 and len(masks_by_extent.keys()) == 1 and len(masks_by_whole_extent.keys()):
dimension_report = '%s masks have the same dimensions, spacing, extent and whole extent:\n\n' % msg
dimensions = masks_by_dimensions.keys().pop()
dimension_report = '%s dimensions = %s\n' % (dimension_report, str(dimensions))
dimensions = masks_by_spacing.keys().pop()
dimension_report = '%s spacing = %s\n' % (dimension_report, str(dimensions))
dimensions = masks_by_extent.keys().pop()
dimension_report = '%s extent = %s\n' % (dimension_report, str(dimensions))
dimensions = masks_by_whole_extent.keys().pop()
dimension_report = '%s whole extent = %s\n' % (dimension_report, str(dimensions))
self._view_frame.dialog_info(dimension_report, 'No mismatches')
else:
dimension_report = '% masks possess %d unique sets of dimensions. See below:\n' % (msg, len(masks_by_dimensions))
for k in masks_by_dimensions.keys():
dimension_report = '%s\n%s => %s' % (dimension_report, str(k), str( masks_by_dimensions[k]))
dimension_report = '%s\n\n%d unique spacings with their defining masks:\n' % (dimension_report, len(masks_by_spacing))
for k in masks_by_spacing.keys():
dimension_report = '%s\n%s => %s' % (dimension_report, str(k), str( masks_by_spacing[k]))
dimension_report = '%s\n\n%d unique extents with their defining masks:\n' % (dimension_report, len(masks_by_extent))
for k in masks_by_extent.keys():
dimension_report = '%s\n%s => %s' % (dimension_report, str(k), str( masks_by_extent[k]))
dimension_report = '%s\n\n%d unique whole_extents with their defining masks:\n' % (dimension_report, len(masks_by_whole_extent))
for k in masks_by_whole_extent.keys():
dimension_report = '%s\n%s => %s' % (dimension_report, str(k), str( masks_by_whole_extent[k]))
self._view_frame.dialog_exclaim(dimension_report,"Mismatches found!")
def _handler_test_all_dimensions(self, event):
"""
Tests whether any of the loaded masks have mismatching volume dimensions
"""
if len(self.masks) < 2:
self._view_frame.dialog_info("At least 2 masks need to be loaded to compare dimensions!","Fewer than two masks loaded")
return
mask_names = self.masks.keys()
self._test_dimensions(mask_names, 'All')
def _handler_test_selected_dimensions(self, event):
"""
Tests the selected masks have mismatching volume dimensions
"""
if self.test_valid_mask_selection_multiple():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
mask_names = names_a.copy()
for name in names_b:
mask_names.add(name)
self._test_dimensions(mask_names, 'Selected')
def _handler_test_all_intersections(self, event):
"""
Tests whether there is an intersection between any of the loaded masks
"""
if len(self.masks) < 2:
self._view_frame.dialog_info("At least 2 masks need to be loaded to detect intersections!","Fewer than two masks loaded")
return
mask_names = self.masks.keys()
self._test_intersections(mask_names)
def _handler_test_selected_intersections(self, event):
"""
Tests whether there is an intersection between the selected masks
"""
if self.test_valid_mask_selection_multiple():
names_a = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
mask_names = names_a.copy()
for name in names_b:
mask_names.add(name)
self._test_intersections(mask_names)
def compute_mask_union(self, mask_names_set):
'''Computes and returns the union of a set of masks, identified by a set of mask names.'''
mask_names = mask_names_set.copy() #To prevent changes to the passed set due to popping
united_mask = None
if len(mask_names) > 0:
mask_name = mask_names.pop()
united_mask = self.masks[mask_name]
for mask_name in mask_names:
united_mask = self._logical_unite_masks(united_mask, self.masks[mask_name])
return united_mask
def test_single_mask_selection(self):
selectionCountA = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
selectionCountB = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selectionCountA + selectionCountB == 0:
self._view_frame.dialog_info("No masks are selected in either column A or B.\nThis operation requires a single mask, either in A or B.","No masks selected - invalid operation")
return False
elif selectionCountA + selectionCountB > 1:
self._view_frame.dialog_info("Multiple masks are selected in columns A and/or B.\nThis operation requires a single mask, either in A or B (but not both).","Multiple masks selected - invalid operation")
return False
return True
def test_single_mask_pair_selection(self):
selectionCountA = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
selectionCountB = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selectionCountA == 0:
self._view_frame.dialog_info("No mask selected in column A.\nThis operation requires a single input each, for A and B.","Too few masks selected - invalid operation")
return False
if selectionCountB == 0:
self._view_frame.dialog_info("No mask selected in column B.\nThis operation requires a single input each, for A and B.","Too few masks selected - invalid operation")
return False
if selectionCountA > 1:
self._view_frame.dialog_info("Multiple masks are selected in column A.\nThis operation requires a single input each, for A and B.","Multiple maks selected - invalid operation")
return False
elif selectionCountB > 1:
self._view_frame.dialog_info("Multiple masks are selected in column B.\nThis operation requires a single input each, for A and B.","Multiple maks selected - invalid operation")
return False
return True
def test_valid_mask_selection_any(self, warn = True):
selectionCountA = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
selectionCountB = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selectionCountA == 0 and selectionCountB == 0:
if warn:
self._view_frame.dialog_info("No masks are selected.","No masks selected")
return False
return True
def test_valid_mask_selection_multiple(self, warn = True):
names = self._view_frame.get_selected_mask_names_a()
names_b = self._view_frame.get_selected_mask_names_b()
for name in names_b:
names.add(name)
if len(names) < 2:
if warn:
self._view_frame.dialog_info("Fewer than two unique masks selected.","Too few masks selected")
return False
return True
def test_valid_mask_selection_a_and_b(self, warn = True):
selectionCountA = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
selectionCountB = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selectionCountA == 0:
if warn:
self._view_frame.dialog_info("No mask is selected in column A.\nThis operation requires inputs A and B.","Mask A not defined")
return False
elif selectionCountB == 0:
if warn:
self._view_frame.dialog_info("No mask is selected in column B.\nThis operation requires inputs A and B.","Mask B not defined")
return False
return True
def test_valid_mask_selection_a(self, warn = True):
selection_count_a = self._view_frame.list_ctrl_maskA.GetSelectedItemCount()
if selection_count_a == 0:
if warn:
self._view_frame.dialog_info("This operation requires input from column A.","Mask A not defined")
return False
return True
def test_valid_mask_selection_b(self, warn = True):
selection_count_b = self._view_frame.list_ctrl_maskB.GetSelectedItemCount()
if selection_count_b == 0:
if warn:
self._view_frame.dialog_info("This operation requires input from column B.","Mask B not defined")
return False
return True
def _handler_close(self, event):
"Closes this program"
self.close()
def _handler_introspect(self, event):
self.miscObjectConfigure(self._view_frame, self, 'MaskComBinar')
def _handler_about(self, event):
self._view_frame.dialog_info("MaskComBinar:\nA tool for measuring and manipulating binary masks\n\nby Francois Malan","About MaskComBinar")
def render(self):
"""Method that calls Render() on the embedded RenderWindow.
Use this after having made changes to the scene.
"""
self._view_frame.render()
def _logical_unite_masks(self, maskA, maskB):
"""Returns logical addition of maskA and maskB => maskA OR maskB"""
if maskA == None:
return maskB
elif maskB == None:
return maskA
print 'Joining masks %s and %s' % (maskA.name, maskB.name)
logicOR = vtk.vtkImageLogic()
logicOR.SetOperationToOr()
logicOR.SetInput1(maskA.data)
logicOR.SetInput2(maskB.data)
logicOR.Update()
result = self._threshold_image(logicOR.GetOutput(), 1, 255)
return Mask('Merged','',result)
def _logical_intersect_masks(self, maskA, maskB):
if maskA == None or maskB == None:
return None
print 'Intersecting masks %s and %s' % (maskA.name, maskB.name)
logicAND = vtk.vtkImageLogic()
logicAND.SetOperationToAnd()
logicAND.SetInput1(maskA.data)
logicAND.SetInput2(maskB.data)
logicAND.Update()
result = self._threshold_image(logicAND.GetOutput(), 1, 255)
return Mask('Intersect','',result)
def _logical_subtract_masks(self, maskA, maskB):
"""Returns logical subtraction of maskB from maskA => maskA AND (NOT maskB)"""
if maskB == None:
return maskA
print 'Subtracting mask %s and %s' % (maskA.name, maskB.name)
logicNOT = vtk.vtkImageLogic()
logicNOT.SetOperationToNot()
logicNOT.SetInput1(maskB.data)
logicNOT.Update()
logicAND = vtk.vtkImageLogic()
logicAND.SetOperationToAnd()
logicAND.SetInput1(maskA.data)
logicAND.SetInput2(logicNOT.GetOutput())
logicAND.Update()
result = self._threshold_image(logicAND.GetOutput(), 1, 255)
return Mask('Diff','',result)
def _threshold_image(self, image, lower, upper):
"""Thresholds a VTK Image, returning a signed short mask with 1 inside and 0 outside [lower, upper]"""
thresholder = vtk.vtkImageThreshold()
thresholder.SetInput(image)
thresholder.ThresholdBetween(lower, upper)
thresholder.SetInValue(1)
thresholder.SetOutValue(0)
thresholder.SetOutputScalarTypeToUnsignedChar()
thresholder.Update()
result = vtk.vtkImageData()
result.DeepCopy(thresholder.GetOutput())
return result
def _split_disconnected_objects(self, mask_name):
#This is done by labelling the objects from large to small
#Convert to ITK
mask = self.masks[mask_name]
thresholder = vtk.vtkImageThreshold()
thresholder.SetInput(mask.data)
thresholder.ThresholdBetween(1, 9999)
thresholder.SetInValue(1)
thresholder.SetOutValue(0)
thresholder.SetOutputScalarTypeToShort()
thresholder.Update()
v2i = itk.VTKImageToImageFilter[itk.Image.SS3].New()
v2i.SetInput(thresholder.GetOutput())
ccf = itk.ConnectedComponentImageFilter.ISS3ISS3.New()
ccf.SetInput(v2i.GetOutput())
relabeller = itk.RelabelComponentImageFilter.ISS3ISS3.New()
relabeller.SetInput(ccf.GetOutput())
#convert back to VTK
i2v = itk.ImageToVTKImageFilter[itk.Image.SS3].New()
i2v.SetInput(relabeller.GetOutput())
i2v.Update()
labeled = i2v.GetOutput()
accumulator = vtk.vtkImageAccumulate()
accumulator.SetInput(labeled)
accumulator.Update()
nr_of_components = accumulator.GetMax()[0]
print 'Found %d disconnected mask components' % nr_of_components
message = '%d disconnected components found.\nHow many do you want to accept (large to small)?' % nr_of_components
nr_to_process_str = self._view_frame.dialog_inputtext(message, 'Choose number of disconnected components', '1')[1]
try:
nr_to_process = int(nr_to_process_str)
except:
self._view_frame.dialog_error('Invalid numeric input: %s' % nr_to_process_str, "Invalid input")
return
if (nr_to_process < 0) or (nr_to_process > nr_of_components):
self._view_frame.dialog_error('Number must be between 1 and %d' % nr_of_components, "Invalid input")
return
print 'Saving the largest %d components to new masks' % nr_to_process
thresholder = vtk.vtkImageThreshold()
thresholder.SetInput(labeled)
thresholder.SetInValue(1)
thresholder.SetOutValue(0)
thresholder.SetOutputScalarTypeToUnsignedChar()
for i in range(1, nr_to_process+1):
thresholder.ThresholdBetween(i, i)
thresholder.Update()
mask_data = vtk.vtkImageData()
mask_data.DeepCopy(thresholder.GetOutput())
new_mask = Mask('comp_%d' % i,'',mask_data)
self.add_mask(new_mask)
|
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import logging
import os
import pipes
import posixpath
import random
import re
import shlex
import sys
import devil_chromium
from devil import devil_env
from devil.android import apk_helper
from devil.android import device_errors
from devil.android import device_utils
from devil.android import flag_changer
from devil.android.sdk import adb_wrapper
from devil.android.sdk import intent
from devil.android.sdk import version_codes
from devil.utils import run_tests_helper
with devil_env.SysPath(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'colorama', 'src')):
import colorama
from incremental_install import installer
from pylib import constants
from pylib.symbols import deobfuscator
def _Colorize(color, text):
# |color| as a string to avoid pylint's no-member warning :(.
# pylint: disable=no-member
return getattr(colorama.Fore, color) + text + colorama.Fore.RESET
def _InstallApk(devices, apk, install_dict):
def install(device):
if install_dict:
installer.Install(device, install_dict, apk=apk)
else:
device.Install(apk)
logging.info('Installing %sincremental apk.', '' if install_dict else 'non-')
device_utils.DeviceUtils.parallel(devices).pMap(install)
def _UninstallApk(devices, install_dict, package_name):
def uninstall(device):
if install_dict:
installer.Uninstall(device, package_name)
else:
device.Uninstall(package_name)
device_utils.DeviceUtils.parallel(devices).pMap(uninstall)
def _LaunchUrl(devices, input_args, device_args_file, url, apk):
if input_args and device_args_file is None:
raise Exception('This apk does not support any flags.')
if url:
view_activity = apk.GetViewActivityName()
if not view_activity:
raise Exception('APK does not support launching with URLs.')
def launch(device):
# The flags are first updated with input args.
changer = flag_changer.FlagChanger(device, device_args_file)
flags = []
if input_args:
flags = shlex.split(input_args)
changer.ReplaceFlags(flags)
# Then launch the apk.
if url is None:
# Simulate app icon click if no url is present.
cmd = ['monkey', '-p', apk.GetPackageName(), '-c',
'android.intent.category.LAUNCHER', '1']
device.RunShellCommand(cmd, check_return=True)
else:
launch_intent = intent.Intent(action='android.intent.action.VIEW',
activity=view_activity, data=url,
package=apk.GetPackageName())
device.StartActivity(launch_intent)
device_utils.DeviceUtils.parallel(devices).pMap(launch)
def _ChangeFlags(devices, input_args, device_args_file):
if input_args is None:
_DisplayArgs(devices, device_args_file)
else:
flags = shlex.split(input_args)
def update(device):
flag_changer.FlagChanger(device, device_args_file).ReplaceFlags(flags)
device_utils.DeviceUtils.parallel(devices).pMap(update)
def _TargetCpuToTargetArch(target_cpu):
if target_cpu == 'x64':
return 'x86_64'
if target_cpu == 'mipsel':
return 'mips'
return target_cpu
def _RunGdb(device, package_name, output_directory, target_cpu, extra_args,
verbose):
gdb_script_path = os.path.dirname(__file__) + '/adb_gdb'
cmd = [
gdb_script_path,
'--package-name=%s' % package_name,
'--output-directory=%s' % output_directory,
'--adb=%s' % adb_wrapper.AdbWrapper.GetAdbPath(),
'--device=%s' % device.serial,
# Use one lib dir per device so that changing between devices does require
# refetching the device libs.
'--pull-libs-dir=/tmp/adb-gdb-libs-%s' % device.serial,
]
# Enable verbose output of adb_gdb if it's set for this script.
if verbose:
cmd.append('--verbose')
if target_cpu:
cmd.append('--target-arch=%s' % _TargetCpuToTargetArch(target_cpu))
cmd.extend(extra_args)
logging.warning('Running: %s', ' '.join(pipes.quote(x) for x in cmd))
print _Colorize('YELLOW', 'All subsequent output is from adb_gdb script.')
os.execv(gdb_script_path, cmd)
def _PrintPerDeviceOutput(devices, results, single_line=False):
for d, result in zip(devices, results):
if not single_line and d is not devices[0]:
sys.stdout.write('\n')
sys.stdout.write(
_Colorize('YELLOW', '%s (%s):' % (d, d.build_description)))
sys.stdout.write(' ' if single_line else '\n')
yield result
def _RunMemUsage(devices, package_name):
def mem_usage_helper(d):
ret = []
proc_map = d.GetPids(package_name)
for name, pids in proc_map.iteritems():
for pid in pids:
ret.append((name, pid, d.GetMemoryUsageForPid(pid)))
return ret
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_results = parallel_devices.pMap(mem_usage_helper).pGet(None)
for result in _PrintPerDeviceOutput(devices, all_results):
if not result:
print 'No processes found.'
else:
for name, pid, usage in sorted(result):
print '%s(%s):' % (name, pid)
for k, v in sorted(usage.iteritems()):
print ' %s=%d' % (k, v)
print
def _DuHelper(device, path_spec, run_as=None):
"""Runs "du -s -k |path_spec|" on |device| and returns parsed result.
Args:
device: A DeviceUtils instance.
path_spec: The list of paths to run du on. May contain shell expansions
(will not be escaped).
run_as: Package name to run as, or None to run as shell user. If not None
and app is not android:debuggable (run-as fails), then command will be
run as root.
Returns:
A dict of path->size in kb containing all paths in |path_spec| that exist on
device. Paths that do not exist are silently ignored.
"""
# Example output for: du -s -k /data/data/org.chromium.chrome/{*,.*}
# 144 /data/data/org.chromium.chrome/cache
# 8 /data/data/org.chromium.chrome/files
# <snip>
# du: .*: No such file or directory
# The -d flag works differently across android version, so use -s instead.
cmd_str = 'du -s -k ' + path_spec
lines = device.RunShellCommand(cmd_str, run_as=run_as, shell=True,
check_return=False)
output = '\n'.join(lines)
# run-as: Package 'com.android.chrome' is not debuggable
if output.startswith('run-as:'):
# check_return=False needed for when some paths in path_spec do not exist.
lines = device.RunShellCommand(cmd_str, as_root=True, shell=True,
check_return=False)
ret = {}
try:
for line in lines:
# du: .*: No such file or directory
if line.startswith('du:'):
continue
size, subpath = line.split(None, 1)
ret[subpath] = int(size)
return ret
except ValueError:
logging.error('Failed to parse du output:\n%s', output)
def _RunDiskUsage(devices, package_name, verbose):
# Measuring dex size is a bit complicated:
# https://source.android.com/devices/tech/dalvik/jit-compiler
#
# For KitKat and below:
# dumpsys package contains:
# dataDir=/data/data/org.chromium.chrome
# codePath=/data/app/org.chromium.chrome-1.apk
# resourcePath=/data/app/org.chromium.chrome-1.apk
# nativeLibraryPath=/data/app-lib/org.chromium.chrome-1
# To measure odex:
# ls -l /data/dalvik-cache/data@app@org.chromium.chrome-1.apk@classes.dex
#
# For Android L and M (and maybe for N+ system apps):
# dumpsys package contains:
# codePath=/data/app/org.chromium.chrome-1
# resourcePath=/data/app/org.chromium.chrome-1
# legacyNativeLibraryDir=/data/app/org.chromium.chrome-1/lib
# To measure odex:
# # Option 1:
# /data/dalvik-cache/arm/data@app@org.chromium.chrome-1@base.apk@classes.dex
# /data/dalvik-cache/arm/data@app@org.chromium.chrome-1@base.apk@classes.vdex
# ls -l /data/dalvik-cache/profiles/org.chromium.chrome
# (these profiles all appear to be 0 bytes)
# # Option 2:
# ls -l /data/app/org.chromium.chrome-1/oat/arm/base.odex
#
# For Android N+:
# dumpsys package contains:
# dataDir=/data/user/0/org.chromium.chrome
# codePath=/data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==
# resourcePath=/data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==
# legacyNativeLibraryDir=/data/app/org.chromium.chrome-GUID/lib
# Instruction Set: arm
# path: /data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==/base.apk
# status: /data/.../oat/arm/base.odex[status=kOatUpToDate, compilation_f
# ilter=quicken]
# Instruction Set: arm64
# path: /data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==/base.apk
# status: /data/.../oat/arm64/base.odex[status=..., compilation_filter=q
# uicken]
# To measure odex:
# ls -l /data/app/.../oat/arm/base.odex
# ls -l /data/app/.../oat/arm/base.vdex (optional)
# To measure the correct odex size:
# cmd package compile -m speed org.chromium.chrome # For webview
# cmd package compile -m speed-profile org.chromium.chrome # For others
def disk_usage_helper(d):
package_output = '\n'.join(d.RunShellCommand(
['dumpsys', 'package', package_name], check_return=True))
# Prints a message but does not return error when apk is not installed.
if 'Unable to find package:' in package_output:
return None
# Ignore system apks.
idx = package_output.find('Hidden system packages:')
if idx != -1:
package_output = package_output[:idx]
try:
data_dir = re.search(r'dataDir=(.*)', package_output).group(1)
code_path = re.search(r'codePath=(.*)', package_output).group(1)
lib_path = re.search(r'(?:legacyN|n)ativeLibrary(?:Dir|Path)=(.*)',
package_output).group(1)
except AttributeError:
raise Exception('Error parsing dumpsys output: ' + package_output)
compilation_filters = set()
# Match "compilation_filter=value", where a line break can occur at any spot
# (refer to examples above).
awful_wrapping = r'\s*'.join('compilation_filter=')
for m in re.finditer(awful_wrapping + r'([\s\S]+?)[\],]', package_output):
compilation_filters.add(re.sub(r'\s+', '', m.group(1)))
compilation_filter = ','.join(sorted(compilation_filters))
data_dir_sizes = _DuHelper(d, '%s/{*,.*}' % data_dir, run_as=package_name)
# Measure code_cache separately since it can be large.
code_cache_sizes = {}
code_cache_dir = next(
(k for k in data_dir_sizes if k.endswith('/code_cache')), None)
if code_cache_dir:
data_dir_sizes.pop(code_cache_dir)
code_cache_sizes = _DuHelper(d, '%s/{*,.*}' % code_cache_dir,
run_as=package_name)
apk_path_spec = code_path
if not apk_path_spec.endswith('.apk'):
apk_path_spec += '/*.apk'
apk_sizes = _DuHelper(d, apk_path_spec)
if lib_path.endswith('/lib'):
# Shows architecture subdirectory.
lib_sizes = _DuHelper(d, '%s/{*,.*}' % lib_path)
else:
lib_sizes = _DuHelper(d, lib_path)
# Look at all possible locations for odex files.
odex_paths = []
for apk_path in apk_sizes:
mangled_apk_path = apk_path[1:].replace('/', '@')
apk_basename = posixpath.basename(apk_path)[:-4]
for ext in ('dex', 'odex', 'vdex', 'art'):
# Easier to check all architectures than to determine active ones.
for arch in ('arm', 'arm64', 'x86', 'x86_64', 'mips', 'mips64'):
odex_paths.append(
'%s/oat/%s/%s.%s' % (code_path, arch, apk_basename, ext))
# No app could possibly have more than 6 dex files.
for suffix in ('', '2', '3', '4', '5'):
odex_paths.append('/data/dalvik-cache/%s/%s@classes%s.%s' % (
arch, mangled_apk_path, suffix, ext))
# This path does not have |arch|, so don't repeat it for every arch.
if arch == 'arm':
odex_paths.append('/data/dalvik-cache/%s@classes%s.dex' % (
mangled_apk_path, suffix))
odex_sizes = _DuHelper(d, ' '.join(pipes.quote(p) for p in odex_paths))
return (data_dir_sizes, code_cache_sizes, apk_sizes, lib_sizes, odex_sizes,
compilation_filter)
def print_sizes(desc, sizes):
print '%s: %dkb' % (desc, sum(sizes.itervalues()))
if verbose:
for path, size in sorted(sizes.iteritems()):
print ' %s: %skb' % (path, size)
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_results = parallel_devices.pMap(disk_usage_helper).pGet(None)
for result in _PrintPerDeviceOutput(devices, all_results):
if not result:
print 'APK is not installed.'
continue
(data_dir_sizes, code_cache_sizes, apk_sizes, lib_sizes, odex_sizes,
compilation_filter) = result
total = sum(sum(sizes.itervalues()) for sizes in result[:-1])
print_sizes('Apk', apk_sizes)
print_sizes('App Data (non-code cache)', data_dir_sizes)
print_sizes('App Data (code cache)', code_cache_sizes)
print_sizes('Native Libs', lib_sizes)
show_warning = compilation_filter and 'speed' not in compilation_filter
compilation_filter = compilation_filter or 'n/a'
print_sizes('odex (compilation_filter=%s)' % compilation_filter, odex_sizes)
if show_warning:
logging.warning('For a more realistic odex size, run:')
logging.warning(' %s compile-dex [speed|speed-profile]', sys.argv[0])
print 'Total: %skb (%.1fmb)' % (total, total / 1024.0)
def _RunLogcat(device, package_name, verbose, mapping_path):
if mapping_path:
try:
deobfuscate = deobfuscator.Deobfuscator(mapping_path)
except OSError:
sys.stderr.write('Error executing "bin/java_deobfuscate". '
'Did you forget to build it?\n')
sys.exit(1)
def get_my_pids():
my_pids = []
for pids in device.GetPids(package_name).values():
my_pids.extend(pids)
return [int(pid) for pid in my_pids]
def process_line(line, fast=False):
if verbose:
if fast:
return
else:
if not line or line.startswith('------'):
return
tokens = line.split(None, 4)
pid = int(tokens[2])
priority = tokens[4]
if pid in my_pids or (not fast and priority == 'F'):
pass # write
elif pid in not_my_pids:
return
elif fast:
# Skip checking whether our package spawned new processes.
not_my_pids.add(pid)
return
else:
# Check and add the pid if it is a new one from our package.
my_pids.update(get_my_pids())
if pid not in my_pids:
not_my_pids.add(pid)
return
if mapping_path:
line = '\n'.join(deobfuscate.TransformLines([line.rstrip()])) + '\n'
sys.stdout.write(line)
try:
my_pids = set(get_my_pids())
not_my_pids = set()
nonce = 'apk_wrappers.py nonce={}'.format(random.random())
device.RunShellCommand(['log', nonce])
fast = True
for line in device.adb.Logcat(logcat_format='threadtime'):
try:
process_line(line, fast)
except:
sys.stderr.write('Failed to process line: ' + line)
raise
if fast and nonce in line:
fast = False
except KeyboardInterrupt:
pass # Don't show stack trace upon Ctrl-C
finally:
if mapping_path:
deobfuscate.Close()
def _RunPs(devices, package_name):
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_pids = parallel_devices.GetPids(package_name).pGet(None)
for proc_map in _PrintPerDeviceOutput(devices, all_pids):
if not proc_map:
print 'No processes found.'
else:
for name, pids in sorted(proc_map.items()):
print name, ','.join(pids)
def _RunShell(devices, package_name, cmd):
if cmd:
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.RunShellCommand(
cmd, run_as=package_name).pGet(None)
for output in _PrintPerDeviceOutput(devices, outputs):
for line in output:
print line
else:
adb_path = adb_wrapper.AdbWrapper.GetAdbPath()
cmd = [adb_path, '-s', devices[0].serial, 'shell']
# Pre-N devices do not support -t flag.
if devices[0].build_version_sdk >= version_codes.NOUGAT:
cmd += ['-t', 'run-as', package_name]
else:
print 'Upon entering the shell, run:'
print 'run-as', package_name
print
os.execv(adb_path, cmd)
def _RunCompileDex(devices, package_name, compilation_filter):
cmd = ['cmd', 'package', 'compile', '-f', '-m', compilation_filter,
package_name]
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.RunShellCommand(cmd).pGet(None)
for output in _PrintPerDeviceOutput(devices, outputs):
for line in output:
print line
def _GenerateAvailableDevicesMessage(devices):
devices_obj = device_utils.DeviceUtils.parallel(devices)
descriptions = devices_obj.pMap(lambda d: d.build_description).pGet(None)
msg = 'Available devices:\n'
for d, desc in zip(devices, descriptions):
msg += ' %s (%s)\n' % (d, desc)
return msg
# TODO(agrieve):add "--all" in the MultipleDevicesError message and use it here.
def _GenerateMissingAllFlagMessage(devices):
return ('More than one device available. Use --all to select all devices, ' +
'or use --device to select a device by serial.\n\n' +
_GenerateAvailableDevicesMessage(devices))
def _DisplayArgs(devices, device_args_file):
def flags_helper(d):
changer = flag_changer.FlagChanger(d, device_args_file)
return changer.GetCurrentFlags()
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.pMap(flags_helper).pGet(None)
print 'Existing flags per-device (via /data/local/tmp/%s):' % device_args_file
for flags in _PrintPerDeviceOutput(devices, outputs, single_line=True):
quoted_flags = ' '.join(pipes.quote(f) for f in flags)
print quoted_flags or 'No flags set.'
def _DeviceCachePath(device, output_directory):
file_name = 'device_cache_%s.json' % device.serial
return os.path.join(output_directory, file_name)
def _LoadDeviceCaches(devices, output_directory):
if not output_directory:
return
for d in devices:
cache_path = _DeviceCachePath(d, output_directory)
if os.path.exists(cache_path):
logging.debug('Using device cache: %s', cache_path)
with open(cache_path) as f:
d.LoadCacheData(f.read())
# Delete the cached file so that any exceptions cause it to be cleared.
os.unlink(cache_path)
else:
logging.debug('No cache present for device: %s', d)
def _SaveDeviceCaches(devices, output_directory):
if not output_directory:
return
for d in devices:
cache_path = _DeviceCachePath(d, output_directory)
with open(cache_path, 'w') as f:
f.write(d.DumpCacheData())
logging.info('Wrote device cache: %s', cache_path)
class _Command(object):
name = None
description = None
needs_package_name = False
needs_output_directory = False
needs_apk_path = False
supports_incremental = False
accepts_command_line_flags = False
accepts_args = False
accepts_url = False
all_devices_by_default = False
calls_exec = False
def __init__(self, from_wrapper_script):
self._parser = None
self._from_wrapper_script = from_wrapper_script
self.args = None
self.apk_helper = None
self.install_dict = None
self.devices = None
# Do not support incremental install outside the context of wrapper scripts.
if not from_wrapper_script:
self.supports_incremental = False
def _RegisterExtraArgs(self, subp):
pass
def RegisterArgs(self, parser):
subp = parser.add_parser(self.name, help=self.description)
self._parser = subp
subp.set_defaults(command=self)
subp.add_argument('--all',
action='store_true',
default=self.all_devices_by_default,
help='Operate on all connected devices.',)
subp.add_argument('-d',
'--device',
action='append',
default=[],
dest='devices',
help='Target device for script to work on. Enter '
'multiple times for multiple devices.')
subp.add_argument('-v',
'--verbose',
action='count',
default=0,
dest='verbose_count',
help='Verbose level (multiple times for more)')
group = subp.add_argument_group('%s arguments' % self.name)
if self.needs_package_name:
# Always gleaned from apk when using wrapper scripts.
group.add_argument('--package-name',
help=argparse.SUPPRESS if self._from_wrapper_script else (
"App's package name."))
if self.needs_apk_path or self.needs_package_name:
# Adding this argument to the subparser would override the set_defaults()
# value set by on the parent parser (even if None).
if not self._from_wrapper_script:
group.add_argument('--apk-path',
required=self.needs_apk_path,
help='Path to .apk')
if self.supports_incremental:
group.add_argument('--incremental',
action='store_true',
default=False,
help='Always install an incremental apk.')
group.add_argument('--non-incremental',
action='store_true',
default=False,
help='Always install a non-incremental apk.')
# accepts_command_line_flags and accepts_args are mutually exclusive.
# argparse will throw if they are both set.
if self.accepts_command_line_flags:
group.add_argument('--args', help='Command-line flags.')
if self.accepts_args:
group.add_argument('--args', help='Extra arguments.')
if self.accepts_url:
group.add_argument('url', nargs='?', help='A URL to launch with.')
if not self._from_wrapper_script and self.accepts_command_line_flags:
# Provided by wrapper scripts.
group.add_argument(
'--command-line-flags-file-name',
help='Name of the command-line flags file')
self._RegisterExtraArgs(group)
def ProcessArgs(self, args):
devices = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.devices,
enable_device_files_cache=bool(args.output_directory),
default_retries=0)
self.args = args
self.devices = devices
# TODO(agrieve): Device cache should not depend on output directory.
# Maybe put int /tmp?
_LoadDeviceCaches(devices, args.output_directory)
# Ensure these keys always exist. They are set by wrapper scripts, but not
# always added when not using wrapper scripts.
args.__dict__.setdefault('apk_path', None)
args.__dict__.setdefault('incremental_json', None)
try:
if len(devices) > 1:
if self.calls_exec:
self._parser.error(device_errors.MultipleDevicesError(devices))
if not args.all and not args.devices:
self._parser.error(_GenerateMissingAllFlagMessage(devices))
if self.supports_incremental:
if args.incremental and args.non_incremental:
self._parser.error('Must use only one of --incremental and '
'--non-incremental')
elif args.non_incremental:
if not args.apk_path:
self._parser.error('Apk has not been built.')
args.incremental_json = None
elif args.incremental:
if not args.incremental_json:
self._parser.error('Incremental apk has not been built.')
args.apk_path = None
if args.apk_path and args.incremental_json:
self._parser.error('Both incremental and non-incremental apks exist. '
'Select using --incremental or --non-incremental')
if self.needs_apk_path or args.apk_path or args.incremental_json:
if args.incremental_json:
with open(args.incremental_json) as f:
install_dict = json.load(f)
apk_path = os.path.join(args.output_directory,
install_dict['apk_path'])
if os.path.exists(apk_path):
self.install_dict = install_dict
self.apk_helper = apk_helper.ToHelper(
os.path.join(args.output_directory,
self.install_dict['apk_path']))
if not self.apk_helper and args.apk_path:
self.apk_helper = apk_helper.ToHelper(args.apk_path)
if not self.apk_helper:
self._parser.error(
'Neither incremental nor non-incremental apk is built.')
if self.needs_package_name and not args.package_name:
if self.apk_helper:
args.package_name = self.apk_helper.GetPackageName()
elif self._from_wrapper_script:
self._parser.error(
'Neither incremental nor non-incremental apk is built.')
else:
self._parser.error('One of --package-name or --apk-path is required.')
# Save cache now if command will not get a chance to afterwards.
if self.calls_exec:
_SaveDeviceCaches(devices, args.output_directory)
except:
_SaveDeviceCaches(devices, args.output_directory)
raise
class _DevicesCommand(_Command):
name = 'devices'
description = 'Describe attached devices.'
all_devices_by_default = True
def Run(self):
print _GenerateAvailableDevicesMessage(self.devices)
class _InstallCommand(_Command):
name = 'install'
description = 'Installs the APK to one or more devices.'
needs_apk_path = True
supports_incremental = True
def Run(self):
_InstallApk(self.devices, self.apk_helper, self.install_dict)
class _UninstallCommand(_Command):
name = 'uninstall'
description = 'Removes the APK to one or more devices.'
needs_package_name = True
def Run(self):
_UninstallApk(self.devices, self.install_dict, self.args.package_name)
class _LaunchCommand(_Command):
name = 'launch'
description = ('Sends a launch intent for the APK after first writing the '
'command-line flags file.')
# TODO(agrieve): Launch could be changed to require only package name by
# parsing "dumpsys package" for launch & view activities.
needs_apk_path = True
accepts_command_line_flags = True
accepts_url = True
all_devices_by_default = True
def Run(self):
_LaunchUrl(self.devices, self.args.args, self.args.command_line_flags_file,
self.args.url, self.apk_helper)
class _RunCommand(_Command):
name = 'run'
description = 'Install and then launch.'
needs_apk_path = True
supports_incremental = True
needs_package_name = True
accepts_command_line_flags = True
accepts_url = True
def Run(self):
logging.warning('Installing...')
_InstallApk(self.devices, self.apk_helper, self.install_dict)
logging.warning('Sending launch intent...')
_LaunchUrl(self.devices, self.args.args, self.args.command_line_flags_file,
self.args.url, self.apk_helper)
class _StopCommand(_Command):
name = 'stop'
description = 'Force-stops the app.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
device_utils.DeviceUtils.parallel(self.devices).ForceStop(
self.args.package_name)
class _ClearDataCommand(_Command):
name = 'clear-data'
descriptions = 'Clears all app data.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
device_utils.DeviceUtils.parallel(self.devices).ClearApplicationState(
self.args.package_name)
class _ArgvCommand(_Command):
name = 'argv'
description = 'Display and optionally update command-line flags file.'
needs_package_name = True
accepts_command_line_flags = True
all_devices_by_default = True
def Run(self):
_ChangeFlags(self.devices, self.args.args,
self.args.command_line_flags_file)
class _GdbCommand(_Command):
name = 'gdb'
description = 'Runs //build/android/adb_gdb with apk-specific args.'
needs_package_name = True
needs_output_directory = True
accepts_args = True
calls_exec = True
def Run(self):
extra_args = shlex.split(self.args.args or '')
_RunGdb(self.devices[0], self.args.package_name, self.args.output_directory,
self.args.target_cpu, extra_args, bool(self.args.verbose_count))
class _LogcatCommand(_Command):
name = 'logcat'
description = 'Runs "adb logcat" filtering to just the current APK processes'
needs_package_name = True
calls_exec = True
def Run(self):
mapping = self.args.proguard_mapping_path
if self.args.no_deobfuscate:
mapping = None
_RunLogcat(self.devices[0], self.args.package_name,
bool(self.args.verbose_count), mapping)
def _RegisterExtraArgs(self, group):
if self._from_wrapper_script:
group.add_argument('--no-deobfuscate', action='store_true',
help='Disables ProGuard deobfuscation of logcat.')
else:
group.set_defaults(no_deobfuscate=False)
group.add_argument('--proguard-mapping-path',
help='Path to ProGuard map (enables deobfuscation)')
class _PsCommand(_Command):
name = 'ps'
description = 'Show PIDs of any APK processes currently running.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunPs(self.devices, self.args.package_name)
class _DiskUsageCommand(_Command):
name = 'disk-usage'
description = 'Show how much device storage is being consumed by the app.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunDiskUsage(self.devices, self.args.package_name,
bool(self.args.verbose_count))
class _MemUsageCommand(_Command):
name = 'mem-usage'
description = 'Show memory usage of currently running APK processes.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunMemUsage(self.devices, self.args.package_name)
class _ShellCommand(_Command):
name = 'shell'
description = ('Same as "adb shell <command>", but runs as the apk\'s uid '
'(via run-as). Useful for inspecting the app\'s data '
'directory.')
needs_package_name = True
@property
def calls_exec(self):
return not self.args.cmd
def _RegisterExtraArgs(self, group):
group.add_argument(
'cmd', nargs=argparse.REMAINDER, help='Command to run.')
def Run(self):
_RunShell(self.devices, self.args.package_name, self.args.cmd)
class _CompileDexCommand(_Command):
name = 'compile-dex'
description = ('Applicable only for Android N+. Forces .odex files to be '
'compiled with the given compilation filter. To see existing '
'filter, use "disk-usage" command.')
needs_package_name = True
all_devices_by_default = True
def _RegisterExtraArgs(self, group):
group.add_argument(
'compilation_filter',
choices=['verify', 'quicken', 'space-profile', 'space',
'speed-profile', 'speed'],
help='For WebView/Monochrome, use "speed". For other apks, use '
'"speed-profile".')
def Run(self):
_RunCompileDex(self.devices, self.args.package_name,
self.args.compilation_filter)
_COMMANDS = [
_DevicesCommand,
_InstallCommand,
_UninstallCommand,
_LaunchCommand,
_RunCommand,
_StopCommand,
_ClearDataCommand,
_ArgvCommand,
_GdbCommand,
_LogcatCommand,
_PsCommand,
_DiskUsageCommand,
_MemUsageCommand,
_ShellCommand,
_CompileDexCommand,
]
def _ParseArgs(parser, from_wrapper_script):
subparsers = parser.add_subparsers()
commands = [clazz(from_wrapper_script) for clazz in _COMMANDS]
for command in commands:
if from_wrapper_script or not command.needs_output_directory:
command.RegisterArgs(subparsers)
# Show extended help when no command is passed.
argv = sys.argv[1:]
if not argv:
argv = ['--help']
return parser.parse_args(argv)
def _RunInternal(parser, output_directory=None):
colorama.init()
parser.set_defaults(output_directory=output_directory)
from_wrapper_script = bool(output_directory)
args = _ParseArgs(parser, from_wrapper_script)
run_tests_helper.SetLogLevel(args.verbose_count)
args.command.ProcessArgs(args)
args.command.Run()
# Incremental install depends on the cache being cleared when uninstalling.
if args.command.name != 'uninstall':
_SaveDeviceCaches(args.command.devices, output_directory)
# TODO(agrieve): Remove =None from target_cpu on or after October 2017.
# It exists only so that stale wrapper scripts continue to work.
def Run(output_directory, apk_path, incremental_json, command_line_flags_file,
target_cpu, proguard_mapping_path):
"""Entry point for generated wrapper scripts."""
constants.SetOutputDirectory(output_directory)
devil_chromium.Initialize(output_directory=output_directory)
parser = argparse.ArgumentParser()
exists_or_none = lambda p: p if p and os.path.exists(p) else None
parser.set_defaults(
command_line_flags_file=command_line_flags_file,
target_cpu=target_cpu,
apk_path=exists_or_none(apk_path),
incremental_json=exists_or_none(incremental_json),
proguard_mapping_path=proguard_mapping_path)
_RunInternal(parser, output_directory=output_directory)
def main():
devil_chromium.Initialize()
_RunInternal(argparse.ArgumentParser(), output_directory=None)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from classytags.arguments import Argument, MultiValueArgument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag, AsTag
from classytags.parser import Parser
from cms.models import Page, Placeholder as PlaceholderModel
from cms.plugin_rendering import render_placeholder
from cms.plugins.utils import get_plugins, assign_plugins
from cms.utils import get_language_from_request, get_cms_setting
from cms.utils.page_resolver import get_page_queryset
from cms.utils.placeholder import validate_placeholder_name
from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.mail import mail_managers
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, get_language
from itertools import chain
import re
register = template.Library()
def get_site_id(site):
if site:
if isinstance(site, Site):
site_id = site.id
elif isinstance(site, int) or (isinstance(site, basestring) and site.isdigit()):
site_id = int(site)
else:
site_id = settings.SITE_ID
else:
site_id = settings.SITE_ID
return site_id
def has_permission(page, request):
return page.has_change_permission(request)
register.filter(has_permission)
CLEAN_KEY_PATTERN = re.compile(r'[^a-zA-Z0-9_-]')
def _clean_key(key):
return CLEAN_KEY_PATTERN.sub('-', key)
def _get_cache_key(name, page_lookup, lang, site_id):
if isinstance(page_lookup, Page):
page_key = str(page_lookup.pk)
else:
page_key = str(page_lookup)
page_key = _clean_key(page_key)
return name + '__page_lookup:' + page_key + '_site:' + str(site_id) + '_lang:' + str(lang)
def _get_page_by_untyped_arg(page_lookup, request, site_id):
"""
The `page_lookup` argument can be of any of the following types:
- Integer: interpreted as `pk` of the desired page
- String: interpreted as `reverse_id` of the desired page
- `dict`: a dictionary containing keyword arguments to find the desired page
(for instance: `{'pk': 1}`)
- `Page`: you can also pass a Page object directly, in which case there will be no database lookup.
- `None`: the current page will be used
"""
if page_lookup is None:
return request.current_page
if isinstance(page_lookup, Page):
return page_lookup
if isinstance(page_lookup, basestring):
page_lookup = {'reverse_id': page_lookup}
elif isinstance(page_lookup, (int, long)):
page_lookup = {'pk': page_lookup}
elif not isinstance(page_lookup, dict):
raise TypeError('The page_lookup argument can be either a Dictionary, Integer, Page, or String.')
page_lookup.update({'site': site_id})
try:
return get_page_queryset(request).get(**page_lookup)
except Page.DoesNotExist:
site = Site.objects.get_current()
subject = _('Page not found on %(domain)s') % {'domain': site.domain}
body = _("A template tag couldn't find the page with lookup arguments `%(page_lookup)s\n`. "
"The URL of the request was: http://%(host)s%(path)s")\
% {'page_lookup': repr(page_lookup), 'host': site.domain, 'path': request.path}
if settings.DEBUG:
raise Page.DoesNotExist(body)
else:
if settings.SEND_BROKEN_LINK_EMAILS:
mail_managers(subject, body, fail_silently=True)
return None
class PageUrl(InclusionTag):
template = 'cms/content.html'
name = 'page_url'
options = Options(
Argument('page_lookup'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, context, page_lookup, lang, site):
site_id = get_site_id(site)
request = context.get('request', False)
if not request:
return {'content': ''}
if request.current_page == "dummy":
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
cache_key = _get_cache_key('page_url', page_lookup, lang, site_id) + '_type:absolute_url'
url = cache.get(cache_key)
if not url:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if page:
url = page.get_absolute_url(language=lang)
cache.set(cache_key, url, get_cms_setting('CACHE_DURATIONS')['content'])
if url:
return {'content': url}
return {'content': ''}
register.tag(PageUrl)
register.tag('page_id_url', PageUrl)
def _get_placeholder(current_page, page, context, name):
from cms.utils.plugins import get_placeholders
placeholder_cache = getattr(current_page, '_tmp_placeholders_cache', {})
if page.pk in placeholder_cache:
return placeholder_cache[page.pk].get(name, None)
placeholder_cache[page.pk] = {}
slots = get_placeholders(page.get_template())
placeholders = page.placeholders.filter(slot__in=slots)
assign_plugins(context['request'], placeholders, get_language())
for placeholder in placeholders:
placeholder_cache[page.pk][placeholder.slot] = placeholder
placeholder.page = page
current_page._tmp_placeholders_cache = placeholder_cache
return placeholder_cache[page.pk].get(name, None)
def get_placeholder_content(context, request, current_page, name, inherit):
edit_mode = getattr(request, 'toolbar', None) and getattr(request.toolbar, 'edit_mode')
pages = [current_page]
# don't display inherited plugins in edit mode, so that the user doesn't
# mistakenly edit/delete them. This is a fix for issue #1303. See the discussion
# there for possible enhancements
if inherit and not edit_mode:
pages = chain([current_page], current_page.get_cached_ancestors(ascending=True))
for page in pages:
placeholder = _get_placeholder(current_page, page, context, name)
if placeholder is None:
continue
if not get_plugins(request, placeholder):
continue
content = render_placeholder(placeholder, context, name)
if content:
return content
# if we reach this point, we have an empty or non-existant placeholder
# call _get_placeholder again to get the placeholder properly rendered
# in frontend editing
placeholder = _get_placeholder(current_page, current_page, context, name)
return render_placeholder(placeholder, context, name)
class PlaceholderParser(Parser):
def parse_blocks(self):
for bit in getattr(self.kwargs['extra_bits'], 'value', self.kwargs['extra_bits']):
if getattr(bit, 'value', bit.var.value) == 'or':
return super(PlaceholderParser, self).parse_blocks()
return
class PlaceholderOptions(Options):
def get_parser_class(self):
return PlaceholderParser
class Placeholder(Tag):
"""
This template node is used to output page content and
is also used in the admin to dynamically generate input fields.
eg: {% placeholder "placeholder_name" %}
{% placeholder "sidebar" inherit %}
{% placeholder "footer" inherit or %}
<a href="/about/">About us</a>
{% endplaceholder %}
Keyword arguments:
name -- the name of the placeholder
width -- additional width attribute (integer) which gets added to the plugin context
(deprecated, use `{% with 320 as width %}{% placeholder "foo"}{% endwith %}`)
inherit -- optional argument which if given will result in inheriting
the content of the placeholder with the same name on parent pages
or -- optional argument which if given will make the template tag a block
tag whose content is shown if the placeholder is empty
"""
name = 'placeholder'
options = PlaceholderOptions(
Argument('name', resolve=False),
MultiValueArgument('extra_bits', required=False, resolve=False),
blocks=[
('endplaceholder', 'nodelist'),
]
)
def render_tag(self, context, name, extra_bits, nodelist=None):
validate_placeholder_name(name)
width = None
inherit = False
for bit in extra_bits:
if bit == 'inherit':
inherit = True
elif bit.isdigit():
width = int(bit)
import warnings
warnings.warn(
"The width parameter for the placeholder tag is deprecated.",
DeprecationWarning
)
if not 'request' in context:
return ''
request = context['request']
if width:
context.update({'width': width})
page = request.current_page
if not page or page == 'dummy':
if nodelist:
return nodelist.render(context)
return ''
content = get_placeholder_content(context, request, page, name, inherit)
if not content and nodelist:
return nodelist.render(context)
return content
def get_name(self):
return self.kwargs['name'].var.value.strip('"').strip("'")
register.tag(Placeholder)
class RenderPlugin(InclusionTag):
template = 'cms/content.html'
name = 'render_plugin'
options = Options(
Argument('plugin')
)
def get_context(self, context, plugin):
# Prepend frontedit toolbar output if applicable
edit = False
request = context['request']
toolbar = getattr(request, 'toolbar', None)
page = request.current_page
if toolbar.edit_mode and (not page or page.has_change_permission(request)):
edit = True
if edit:
from cms.middleware.toolbar import toolbar_plugin_processor
processors = (toolbar_plugin_processor,)
else:
processors = None
return {'content': plugin.render_plugin(context, processors=processors)}
register.tag(RenderPlugin)
class PageAttribute(AsTag):
"""
This template node is used to output attribute from a page such
as its title or slug.
Synopsis
{% page_attribute "field-name" %}
{% page_attribute "field-name" as varname %}
{% page_attribute "field-name" page_lookup %}
{% page_attribute "field-name" page_lookup as varname %}
Example
{# Output current page's page_title attribute: #}
{% page_attribute "page_title" %}
{# Output page_title attribute of the page with reverse_id "the_page": #}
{% page_attribute "page_title" "the_page" %}
{# Output slug attribute of the page with pk 10: #}
{% page_attribute "slug" 10 %}
{# Assign page_title attribute to a variable: #}
{% page_attribute "page_title" as title %}
Keyword arguments:
field-name -- the name of the field to output. Use one of:
- title
- menu_title
- page_title
- slug
- meta_description
- meta_keywords
page_lookup -- lookup argument for Page, if omitted field-name of current page is returned.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
varname -- context variable name. Output will be added to template context as this variable.
This argument is required to follow the 'as' keyword.
"""
name = 'page_attribute'
options = Options(
Argument('name', resolve=False),
Argument('page_lookup', required=False, default=None),
'as',
Argument('varname', required=False, resolve=False)
)
valid_attributes = [
"title",
"slug",
"meta_description",
"meta_keywords",
"page_title",
"menu_title"
]
def get_value(self, context, name, page_lookup):
if not 'request' in context:
return ''
name = name.lower()
request = context['request']
lang = get_language_from_request(request)
page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
if page == "dummy":
return ''
if page and name in self.valid_attributes:
func = getattr(page, "get_%s" % name)
return escape(func(language=lang, fallback=True))
return ''
register.tag(PageAttribute)
class CleanAdminListFilter(InclusionTag):
template = 'admin/filter.html'
name = 'clean_admin_list_filter'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title(), 'choices': unique_choices}
def _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=None,
site=None, cache_result=True):
"""
Shows the content of a page with a placeholder name and given lookup
arguments in the given language.
This is useful if you want to have some more or less static content that is
shared among many pages, such as a footer.
See _get_page_by_untyped_arg() for detailed information on the allowed types
and their interpretation for the page_lookup argument.
"""
validate_placeholder_name(placeholder_name)
request = context.get('request', False)
site_id = get_site_id(site)
if not request:
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
content = None
if cache_result:
base_key = _get_cache_key('_show_placeholder_for_page', page_lookup, lang, site_id)
cache_key = _clean_key('%s_placeholder:%s' % (base_key, placeholder_name))
content = cache.get(cache_key)
if not content:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if not page:
return {'content': ''}
try:
placeholder = page.placeholders.get(slot=placeholder_name)
except PlaceholderModel.DoesNotExist:
if settings.DEBUG:
raise
return {'content': ''}
content = render_placeholder(placeholder, context, placeholder_name)
if cache_result:
cache.set(cache_key, content, get_cms_setting('CACHE_DURATIONS')['content'])
if content:
return {'content': mark_safe(content)}
return {'content': ''}
class ShowPlaceholderById(InclusionTag):
template = 'cms/content.html'
name = 'show_placeholder_by_id'
options = Options(
Argument('placeholder_name'),
Argument('reverse_id'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, *args, **kwargs):
return _show_placeholder_for_page(**self.get_kwargs(*args, **kwargs))
def get_kwargs(self, context, placeholder_name, reverse_id, lang, site):
return {
'context': context,
'placeholder_name': placeholder_name,
'page_lookup': reverse_id,
'lang': lang,
'site': site
}
register.tag(ShowPlaceholderById)
register.tag('show_placeholder', ShowPlaceholderById)
class ShowUncachedPlaceholderById(ShowPlaceholderById):
name = 'show_uncached_placeholder_by_id'
def get_kwargs(self, *args, **kwargs):
kwargs = super(ShowUncachedPlaceholderById, self).get_kwargs(*args, **kwargs)
kwargs['cache_result'] = False
return kwargs
register.tag(ShowUncachedPlaceholderById)
register.tag('show_uncached_placeholder', ShowUncachedPlaceholderById)
class CMSToolbar(InclusionTag):
template = 'cms/toolbar/toolbar.html'
name = 'cms_toolbar'
def render(self, context):
request = context.get('request', None)
if not request:
return ''
toolbar = getattr(request, 'toolbar', None)
if not toolbar:
return ''
if not toolbar.show_toolbar:
return ''
return super(CMSToolbar, self).render(context)
def get_context(self, context):
context['CMS_TOOLBAR_CONFIG'] = context['request'].toolbar.as_json(context)
return context
register.tag(CMSToolbar)
|
|
# Natural Language Toolkit: Combinatory Categorial Grammar
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Graeme Gange <ggange@csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import unicode_literals
import re
from collections import defaultdict
from nltk.ccg.api import PrimitiveCategory, Direction, CCGVar, FunctionalCategory
from nltk.compat import python_2_unicode_compatible
#------------
# Regular expressions used for parsing components of the lexicon
#------------
# Parses a primitive category and subscripts
rePrim = re.compile(r'''([A-Za-z]+)(\[[A-Za-z,]+\])?''')
# Separates the next primitive category from the remainder of the
# string
reNextPrim = re.compile(r'''([A-Za-z]+(?:\[[A-Za-z,]+\])?)(.*)''')
# Separates the next application operator from the remainder
reApp = re.compile(r'''([\\/])([.,]?)([.,]?)(.*)''')
# Parses the definition of the category of either a word or a family
reLex = re.compile(r'''([A-Za-z_]+)\s*(::|[-=]+>)\s*(.+)''')
# Strips comments from a line
reComm = re.compile('''([^#]*)(?:#.*)?''')
#----------
# Lexicons
#----------
@python_2_unicode_compatible
class CCGLexicon(object):
'''
Class representing a lexicon for CCG grammars.
primitives - The list of primitive categories for the lexicon
families - Families of categories
entries - A mapping of words to possible categories
'''
def __init__(self,start,primitives,families,entries):
self._start = PrimitiveCategory(start)
self._primitives = primitives
self._families = families
self._entries = entries
# Returns all the possible categories for a word
def categories(self, word):
return self._entries[word]
# Returns the target category for the parser
def start(self):
return self._start
# String representation of the lexicon
# Used for debugging
def __str__(self):
st = ""
first = True
for ident in self._entries:
if not first:
st = st + "\n"
st = st + ident + " => "
first = True
for cat in self._entries[ident]:
if not first:
st = st + " | "
else:
first = False
st = st + "%s" % cat
return st
#-----------
# Parsing lexicons
#-----------
# Separates the contents matching the first set of brackets
# from the rest of the input.
def matchBrackets(string):
rest = string[1:]
inside = "("
while rest != "" and not rest.startswith(')'):
if rest.startswith('('):
(part, rest) = matchBrackets(rest)
inside = inside + part
else:
inside = inside + rest[0]
rest = rest[1:]
if rest.startswith(')'):
return (inside + ')', rest[1:])
raise AssertionError('Unmatched bracket in string \'' + string + '\'')
# Separates the string for the next portion of the category
# from the rest of the string
def nextCategory(string):
if string.startswith('('):
return matchBrackets(string)
return reNextPrim.match(string).groups()
# Parses an application operator
def parseApplication(app):
return Direction(app[0], app[1:])
# Parses the subscripts for a primitive category
def parseSubscripts(subscr):
if subscr:
return subscr[1:-1].split(',')
return []
# Parse a primitive category
def parsePrimitiveCategory(chunks, primitives, families, var):
# If the primitive is the special category 'var',
# replace it with the correct CCGVar
if chunks[0] == "var":
if chunks[1] is None:
if var is None:
var = CCGVar()
return (var, var)
catstr = chunks[0]
if catstr in families:
(cat, cvar) = families[catstr]
if var is None:
var = cvar
else:
cat = cat.substitute([(cvar, var)])
return (cat, var)
if catstr in primitives:
subscrs = parseSubscripts(chunks[1])
return (PrimitiveCategory(catstr, subscrs), var)
raise AssertionError('String \'' + catstr + '\' is neither a family nor primitive category.')
# parseCategory drops the 'var' from the tuple
def parseCategory(line, primitives, families):
return augParseCategory(line, primitives, families)[0]
# Parses a string representing a category, and returns
# a tuple with (possibly) the CCG variable for the category
def augParseCategory(line, primitives, families, var=None):
(str, rest) = nextCategory(line)
if str.startswith('('):
(res, var) = augParseCategory(str[1:-1], primitives, families, var)
else:
# print rePrim.match(str).groups()
(res, var) = parsePrimitiveCategory(rePrim.match(str).groups(),
primitives, families, var)
while rest != "":
app = reApp.match(rest).groups()
dir = parseApplication(app[0:3])
rest = app[3]
(str, rest) = nextCategory(rest)
if str.startswith('('):
(arg, var) = augParseCategory(str[1:-1], primitives, families, var)
else:
(arg, var) = parsePrimitiveCategory(rePrim.match(str).groups(), primitives, families, var)
res = FunctionalCategory(res, arg, dir)
return (res, var)
# Takes an input string, and converts it into a lexicon for CCGs.
def parseLexicon(lex_str):
primitives = []
families = {}
entries = defaultdict(list)
for line in lex_str.splitlines():
# Strip comments and leading/trailing whitespace.
line = reComm.match(line).groups()[0].strip()
if line == "":
continue
if line.startswith(':-'):
# A line of primitive categories.
# The first line is the target category
# ie, :- S, N, NP, VP
primitives = primitives + [ prim.strip() for prim in line[2:].strip().split(',') ]
else:
# Either a family definition, or a word definition
(ident, sep, catstr) = reLex.match(line).groups()
(cat, var) = augParseCategory(catstr, primitives, families)
if sep == '::':
# Family definition
# ie, Det :: NP/N
families[ident] = (cat, var)
else:
# Word definition
# ie, which => (N\N)/(S/NP)
entries[ident].append(cat)
return CCGLexicon(primitives[0], primitives, families, entries)
openccg_tinytiny = parseLexicon('''
# Rather minimal lexicon based on the openccg `tinytiny' grammar.
# Only incorporates a subset of the morphological subcategories, however.
:- S,NP,N # Primitive categories
Det :: NP/N # Determiners
Pro :: NP
IntransVsg :: S\\NP[sg] # Tensed intransitive verbs (singular)
IntransVpl :: S\\NP[pl] # Plural
TransVsg :: S\\NP[sg]/NP # Tensed transitive verbs (singular)
TransVpl :: S\\NP[pl]/NP # Plural
the => NP[sg]/N[sg]
the => NP[pl]/N[pl]
I => Pro
me => Pro
we => Pro
us => Pro
book => N[sg]
books => N[pl]
peach => N[sg]
peaches => N[pl]
policeman => N[sg]
policemen => N[pl]
boy => N[sg]
boys => N[pl]
sleep => IntransVsg
sleep => IntransVpl
eat => IntransVpl
eat => TransVpl
eats => IntransVsg
eats => TransVsg
see => TransVpl
sees => TransVsg
''')
|
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[1, 0.5, 2, 10, 10],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1520,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
input_padding=1,
include_diff=False,
clip_appliance_power=False
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=.00001, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Uniform(25)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cond_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
_OPTIONAL_OPS = frozenset([
"OptionalFromValue", "OptionalNone", "OptionalHasValue", "OptionalGetValue"
])
class CondV2Test(test.TestCase):
def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
if not feed_dict:
feed_dict = {}
with self.session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(
array_ops.squeeze_v2(pred), true_fn, false_fn, name="expected")
actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual")
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
sess_run_args = {pred: True}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: [[True]]}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: False}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: [[False]]}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
@test_util.run_deprecated_v1
def testBasic(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * 2.0
def false_fn():
return y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testReturnsIndexedSlicesAndNones(self):
@def_function.function
def build_cond_with_indexed_slices():
pred = constant_op.constant(True)
def true_fn():
return math_ops._as_indexed_slices(constant_op.constant([1.])), None
def false_fn():
return math_ops._as_indexed_slices(constant_op.constant([2.])), None
result = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertIsNone(result[1])
return ops.convert_to_tensor(result[0])
output = build_cond_with_indexed_slices()
self.assertAllEqual(output, [1.])
def testReturnsNonesAndIndexedSlices(self):
@def_function.function
def build_cond_with_indexed_slices():
pred = constant_op.constant(True)
def true_fn():
return (None, None, None,
math_ops._as_indexed_slices(constant_op.constant([1.])))
def false_fn():
return (None, None, None,
math_ops._as_indexed_slices(constant_op.constant([2.])))
result = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertIsNone(result[0])
self.assertIsNone(result[1])
self.assertIsNone(result[2])
return ops.convert_to_tensor(result[3])
output = build_cond_with_indexed_slices()
self.assertAllEqual(output, [1.])
def testExternalControlDependencies(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.0)
v.initializer.run()
op = v.assign_add(1.0)
def true_branch():
with ops.control_dependencies([op]):
return 1.0
cond_v2.cond_v2(array_ops.placeholder_with_default(False, None),
true_branch,
lambda: 2.0).eval()
self.assertAllEqual(self.evaluate(v), 2.0)
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return x, y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testBasic2(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * y * 2.0
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNoInputs(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
def true_fn():
return constant_op.constant(1.0)
def false_fn():
return constant_op.constant(2.0)
out = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertEqual(sess.run(out, {pred: True}), (1.0,))
self.assertEqual(sess.run(out, {pred: False}), (2.0,))
def _createCond(self, name):
"""Creates a cond_v2 call and returns the output tensor and the cond op."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "StatelessIf")
return output, cond_op
def _createNestedCond(self, name):
"""Like _createCond but creates a nested cond_v2 call as well."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return cond_v2.cond_v2(pred, lambda: x, lambda: x + 1)
def false_fn():
return x + 2
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "StatelessIf")
return output, cond_op
def testDefaultName(self):
with ops.Graph().as_default():
_, cond_op = self._createCond(None)
self.assertEqual(cond_op.name, "cond")
self.assertRegexpMatches(
cond_op.get_attr("then_branch").name, r"cond_true_\d*")
self.assertRegexpMatches(
cond_op.get_attr("else_branch").name, r"cond_false_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
_, cond1_op = self._createCond("")
self.assertEqual(cond1_op.name, "foo/cond")
self.assertRegexpMatches(
cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*")
self.assertRegexpMatches(
cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*")
_, cond2_op = self._createCond(None)
self.assertEqual(cond2_op.name, "foo/cond_1")
self.assertRegexpMatches(
cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*")
self.assertRegexpMatches(
cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
@test_util.run_v2_only
def testInheritParentNameScope(self):
@def_function.function
def f():
with ops.name_scope("foo"):
def then_branch():
with ops.name_scope("then"):
actual_name_scope = ops.get_name_scope()
expected_name_scope = "foo/cond/then"
self.assertEqual(actual_name_scope, expected_name_scope)
return 0.
def else_branch():
with ops.name_scope("else"):
actual_name_scope = ops.get_name_scope()
expected_name_scope = "foo/cond/else"
self.assertEqual(actual_name_scope, expected_name_scope)
return 0.
return cond_v2.cond_v2(
constant_op.constant(True), then_branch, else_branch)
f()
@test_util.run_v1_only("b/120545219")
def testDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
return x * y * 2.0
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
return x * y * 2.0
return nested_fn()
return fn()
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testDoubleNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
@function.defun
def nested_nested_fn():
return x * y * 2.0
return nested_nested_fn()
return nested_fn()
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedCond(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
return x * y * 2.0
def false_false_fn():
return x * 5.0
return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn")
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testNestedCondBothBranches(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return _cond(pred, lambda: x + y, lambda: x * x, name=None)
def false_fn():
return _cond(pred, lambda: x - y, lambda: y * y, name=None)
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testDoubleNestedCond(self):
def run_test(pred1_value, pred2_value):
def build_graph():
pred1 = array_ops.placeholder(dtypes.bool, name="pred1")
pred2 = array_ops.placeholder(dtypes.bool, name="pred2")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
def false_true_true_fn():
return x * y * 2.0
def false_true_false_fn():
return x * 10.0
return _cond(
pred1,
false_true_true_fn,
false_true_false_fn,
name="inside_false_true_fn")
def false_false_fn():
return x * 5.0
return _cond(
pred2, false_true_fn, false_false_fn, name="inside_false_fn")
return x, y, pred1, pred2, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [y], {
pred1: pred1_value,
pred2: pred2_value
})
run_test(True, True)
run_test(True, False)
run_test(False, False)
run_test(False, True)
def testGradientFromInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testGradientFromInsideNestedDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
@function.defun
def inner_nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
return inner_nesting_fn()
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testBuildCondAndGradientInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
# Build cond and its gradient inside a Defun.
@function.defun
def fn():
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
return gradients_impl.gradients(cond_outer, [x, y])
grads = fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default(), self.session(
graph=ops.get_default_graph()) as sess:
grads, pred_outer, pred_inner = build_graph()
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
@test_util.run_deprecated_v1
def testSecondDerivative(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
cond_grad = gradients_impl.gradients(cond, [x])
cond_grad_grad = gradients_impl.gradients(cond_grad, [x])
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientOfDeserializedCond(self):
with ops.Graph().as_default():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
ops.add_to_collection("x", x)
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
ops.add_to_collection("pred", pred)
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
ops.add_to_collection("cond", cond)
meta_graph = saver.export_meta_graph()
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
saver.import_meta_graph(meta_graph)
x = ops.get_collection("x")[0]
pred = ops.get_collection("pred")[0]
cond = ops.get_collection("cond")
cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad")
cond_grad_grad = gradients_impl.gradients(
cond_grad, [x], name="cond_grad_grad")
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
@test_util.run_deprecated_v1
def testFuncCond(self):
@def_function.function
def fn_with_cond():
cond_v2.cond_v2(
constant_op.constant(True),
lambda: array_ops.zeros([]),
lambda: array_ops.ones([]),
name="cond_1")
return cond_v2.cond_v2(
constant_op.constant(False),
lambda: array_ops.zeros([]),
lambda: array_ops.ones([]),
name="cond_2")
concrete_fn = fn_with_cond.get_concrete_function()
cond_1 = concrete_fn.graph.get_operation_by_name("cond_1")
cond_2 = concrete_fn.graph.get_operation_by_name("cond_2")
# Verify that all functional ops are stateless and cond_2 does not have
# any control inputs.
self.assertEqual(cond_1.type, "StatelessIf")
self.assertEqual(cond_2.type, "StatelessIf")
self.assertLen(cond_2.control_inputs, 0)
fn_output = concrete_fn()
self.assertEqual(fn_output.op.type, "PartitionedCall")
self.assertAllEqual(fn_output, 1.0)
@test_util.run_deprecated_v1
def testFuncCondFunc(self):
@def_function.function
def fn_with_cond():
cond_v2.cond_v2(
constant_op.constant(True),
lambda: constant_op.constant(1.),
lambda: constant_op.constant(2.),
name="cond_1")
@def_function.function
def true_branch():
return constant_op.constant(3.)
return cond_v2.cond_v2(
constant_op.constant(True),
true_branch,
lambda: constant_op.constant(4.),
name="cond_2")
concrete_fn = fn_with_cond.get_concrete_function()
cond_1 = concrete_fn.graph.get_operation_by_name("cond_1")
cond_2 = concrete_fn.graph.get_operation_by_name("cond_2")
# Verify that all functional ops are stateless and cond_2 does not have
# any control inputs.
self.assertEqual(cond_1.type, "StatelessIf")
self.assertEqual(cond_2.type, "StatelessIf")
self.assertLen(cond_2.control_inputs, 0)
cond_2_true_graph, _ = cond_v2.get_func_graphs(cond_2)
cond_2_true_graph_operations = cond_2_true_graph.get_operations()
self.assertEmpty([
op for op in cond_2_true_graph_operations
if op.type == "StatefulPartitionedCall"
])
self.assertLen([
op for op in cond_2_true_graph_operations
if op.type == "PartitionedCall"
], 1)
fn_output = concrete_fn()
self.assertEqual(fn_output.op.type, "PartitionedCall")
self.assertAllEqual(fn_output, 3.0)
@test_util.run_deprecated_v1
def testFuncCondWithVariable(self):
v1 = variables.Variable(2.)
v2 = variables.Variable(4.)
self.evaluate(variables.global_variables_initializer())
def update_v1():
v1.assign(v1)
return v1
def update_v2():
v2.assign(v2)
return v2
@def_function.function
def fn_with_cond():
cond_v2.cond_v2(
constant_op.constant(True),
update_v1,
lambda: constant_op.constant(0.),
name="cond_1")
cond_2 = cond_v2.cond_v2(
constant_op.constant(False),
lambda: constant_op.constant(0.),
update_v1,
name="cond_2")
cond_v2.cond_v2(
constant_op.constant(True),
update_v2,
lambda: constant_op.constant(0.),
name="cond_3")
cond_4 = cond_v2.cond_v2(
constant_op.constant(False),
lambda: constant_op.constant(0.),
lambda: v2,
name="cond_4")
stateless_cond = cond_v2.cond_v2(
constant_op.constant(False),
lambda: constant_op.constant(5.),
lambda: constant_op.constant(6.),
name="stateless_cond")
return cond_2, cond_4, stateless_cond
concrete_fn = fn_with_cond.get_concrete_function()
cond_1 = concrete_fn.graph.get_operation_by_name("cond_1")
cond_2 = concrete_fn.graph.get_operation_by_name("cond_2")
cond_3 = concrete_fn.graph.get_operation_by_name("cond_3")
cond_4 = concrete_fn.graph.get_operation_by_name("cond_4")
stateless_cond = concrete_fn.graph.get_operation_by_name("stateless_cond")
self.assertEqual(cond_1.type, "If")
self.assertEqual(cond_2.type, "If")
self.assertEqual(cond_3.type, "If")
self.assertEqual(cond_4.type, "If")
self.assertEqual(stateless_cond.type, "StatelessIf")
self.assertEmpty(cond_1.control_inputs)
self.assertLen(cond_2.control_inputs, 1)
self.assertIs(cond_2.control_inputs[0], cond_1)
self.assertEmpty(cond_3.control_inputs)
self.assertLen(cond_4.control_inputs, 1)
self.assertIs(cond_4.control_inputs[0], cond_3)
# Does not touch any variable so should not have any control inputs.
self.assertEmpty(stateless_cond.control_inputs)
fn_output = concrete_fn()
self.assertEqual(fn_output[0].op.type, "StatefulPartitionedCall")
self.assertAllEqual(self.evaluate(fn_output), [2.0, 4.0, 6.0])
@test_util.run_deprecated_v1
def testFuncCondFuncWithVariable(self):
v1 = variables.Variable(2.)
v2 = variables.Variable(4.)
self.evaluate(variables.global_variables_initializer())
@def_function.function
def fn_with_cond():
def update_v1():
v1.assign(v1)
return v1
def update_v2():
v2.assign(v2)
return v2
cond_v2.cond_v2(
constant_op.constant(True),
update_v1,
lambda: constant_op.constant(0.),
name="cond_1")
cond_2 = cond_v2.cond_v2(
constant_op.constant(False),
lambda: constant_op.constant(0.),
update_v1,
name="cond_2")
cond_v2.cond_v2(
constant_op.constant(True),
update_v2,
lambda: constant_op.constant(0.),
name="cond_3")
@def_function.function
def cond_4_false_branch():
v2.assign(v2)
return v2
cond_4 = cond_v2.cond_v2(
constant_op.constant(False),
lambda: constant_op.constant(0.),
cond_4_false_branch,
name="cond_4")
return cond_2, cond_4
concrete_fn = fn_with_cond.get_concrete_function()
cond_1 = concrete_fn.graph.get_operation_by_name("cond_1")
cond_2 = concrete_fn.graph.get_operation_by_name("cond_2")
cond_3 = concrete_fn.graph.get_operation_by_name("cond_3")
cond_4 = concrete_fn.graph.get_operation_by_name("cond_4")
self.assertEqual(cond_1.type, "If")
self.assertEqual(cond_2.type, "If")
self.assertEqual(cond_3.type, "If")
self.assertEqual(cond_4.type, "If")
self.assertEmpty(cond_1.control_inputs)
self.assertLen(cond_2.control_inputs, 1)
self.assertIs(cond_2.control_inputs[0], cond_1)
self.assertEmpty(cond_3.control_inputs)
self.assertLen(cond_4.control_inputs, 1)
self.assertIs(cond_4.control_inputs[0], cond_3)
_, cond_4_false_graph = cond_v2.get_func_graphs(cond_4)
cond_4_false_graph_operations = cond_4_false_graph.get_operations()
self.assertEmpty([
op for op in cond_4_false_graph_operations
if op.type == "PartitionedCall"
])
self.assertLen([
op for op in cond_4_false_graph_operations
if op.type == "StatefulPartitionedCall"
], 1)
fn_output = concrete_fn()
self.assertEqual(fn_output[0].op.type, "StatefulPartitionedCall")
self.assertAllEqual(self.evaluate(fn_output), [2.0, 4.0])
def testGradientTapeOfCondWithResourceVariableInFunction(self):
with context.eager_mode():
v = variables.Variable(2.)
@def_function.function
def fn_with_cond():
with backprop.GradientTape() as tape:
pred = constant_op.constant(True, dtype=dtypes.bool)
def true_fn():
return math_ops.pow(v, 3)
def false_fn():
return v
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
return tape.gradient(cond, v)
self.assertAllEqual(fn_with_cond(), 12.0)
def testLowering(self):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cond_output, _ = self._createCond("cond")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# If lowering was enabled, there should be a `Switch` node
self.assertTrue(
_has_node_with_op(run_metadata, "Switch"),
"A `Switch` op should exist if the graph was lowered.")
# If lowering was enabled, there should be no `If` node
self.assertFalse(
_has_node_with_op(run_metadata, "StatelessIf"),
"An `If` op was found, but it should be lowered.")
@test_util.run_deprecated_v1
def testLoweringDisabledInXLA(self):
with self.session(graph=ops.Graph()) as sess:
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
cond_output, cond_op = self._createCond("cond")
xla_context.Exit()
# Check lowering attr is not set.
with self.assertRaises(ValueError):
cond_op.get_attr("_lower_using_switch_merge")
# Check the actual graph that is run.
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# Lowering disabled in XLA, there should be no `Switch` node
self.assertFalse(
_has_node_with_op(run_metadata, "Switch"),
"A `Switch` op exists, but the graph should not be lowered.")
if test_util.is_xla_enabled():
# If XLA is actually enabled then we expect the StatelessIf to have been
# put inside an XLA cluster.
self.assertFalse(
_has_node_with_op(run_metadata, "StatelessIf"),
("A `StatelessIf` op was found, but the node should have been " +
"clustered."))
self.assertTrue(
_has_node_with_op(run_metadata, "_XlaCompile"),
("An `_XlaCompile` op was not found, but the `StatelessIf` (at " +
"least) op should have been clustered."))
self.assertTrue(
_has_node_with_op(run_metadata, "_XlaRun"),
("An `_XlaRun` op was not found, but the `StatelessIf` (at " +
"least) op should have been clustered."))
else:
# Lowering disabled in XLA, there should still be an `If` node
self.assertTrue(
_has_node_with_op(run_metadata, "StatelessIf"),
("A `StatelessIf` op was not found, but the graph should not be " +
"lowered."))
@test_util.run_deprecated_v1
def testNestedLoweringDisabledInXLA(self):
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
_, cond_op = self._createNestedCond("cond")
xla_context.Exit()
# Check lowering attr is not set for either If node.
with self.assertRaises(ValueError):
cond_op.get_attr("_lower_using_switch_merge")
nested_if_ops = []
for func in ops.get_default_graph()._functions.values():
nested_if_ops.extend(
op for op in func.graph.get_operations() if op.type == "StatelessIf")
self.assertEqual(len(nested_if_ops), 1)
with self.assertRaises(ValueError):
nested_if_ops[0].get_attr("_lower_using_switch_merge")
# TODO(skyewm): check the actual graphs that are run once we have a way to
# programmatically access those graphs.
# b/131355614
@test_util.run_deprecated_v1
def testNoOptionalsInXla(self):
@def_function.function
def func_with_cond():
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
intermediate = x + 1
return intermediate * x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn)
grad = gradients_impl.gradients(output, x)[0]
forward_if_op = output.op.inputs[0].op
gradient_if_op = grad.op.inputs[0].op
def verify_no_optional_ops(op, branch_name):
branch_function = ops.get_default_graph()._get_function(
op.get_attr(branch_name).name)
function_def = branch_function.definition
for node_def in function_def.node_def:
self.assertNotIn(node_def.op, _OPTIONAL_OPS)
verify_no_optional_ops(forward_if_op, "then_branch")
verify_no_optional_ops(forward_if_op, "else_branch")
verify_no_optional_ops(gradient_if_op, "then_branch")
verify_no_optional_ops(gradient_if_op, "else_branch")
return grad
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
func_with_cond()
xla_context.Exit()
@test_util.run_deprecated_v1
def testLoweringDisabledWithSingleThreadedExecutorContext(self):
with self.session(graph=ops.Graph()) as sess:
@function.defun
def _add_cond(x):
return cond_v2.cond_v2(
constant_op.constant(True, name="pred"),
lambda: x,
lambda: x + 1)
x = array_ops.placeholder(shape=None, dtype=dtypes.float32)
with context.function_executor_type("SINGLE_THREADED_EXECUTOR"):
out_cond = _add_cond(x)
# The fact that sess.run() succeeds means lowering is disabled, because
# the single threaded executor does not support cond v1 ops.
sess.run(out_cond, feed_dict={x: 1.0})
@test_util.enable_control_flow_v2
def testStructuredOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return ((x * y,), y)
def false_fn():
return ((x,), y * 3.0)
output = control_flow_ops.cond(
constant_op.constant(False), true_fn, false_fn)
self.assertEqual(self.evaluate(output[0][0]), 1.)
self.assertEqual(self.evaluate(output[1]), 9.)
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
TypeError, "true_fn and false_fn arguments to tf.cond must have the "
"same number, type, and overall structure of return values."):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
@test_util.enable_control_flow_v2
def testCondAndTensorArray(self):
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
output_t = output.stack()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.enable_control_flow_v2
def testCondAndTensorArrayInDefun(self):
@function.defun
def f():
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
return output.stack()
output_t = f()
self.assertAllEqual(output_t, [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.run_deprecated_v1
def testForwardPassRewrite(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
def true_fn():
y_plus_one = y + 1.
return x * y_plus_one
output = cond_v2.cond_v2(constant_op.constant(True), true_fn, lambda: x)
if_op = output.op.inputs[0].op
self.assertEqual(if_op.type, "StatelessIf")
# pylint: disable=g-deprecated-assert
self.assertEqual(len(if_op.outputs), 1)
gradients_impl.gradients(output, x)
# if_op should have been rewritten to output `y_plus_one`.
self.assertEqual(len(if_op.outputs), 2)
gradients_impl.gradients(output, x)
# Computing the gradient again shouldn't rewrite if_op again.
self.assertEqual(len(if_op.outputs), 2)
# pylint: enable=g-deprecated-assert
@test_util.run_deprecated_v1
def testDoNotAccumulateConstants(self):
x = constant_op.constant(1.0, name="x")
output = cond_v2.cond_v2(
constant_op.constant(True), lambda: x * 2.0, lambda: x)
if_op = output.op.inputs[0].op
self.assertEqual(if_op.type, "StatelessIf")
# pylint: disable=g-deprecated-assert
self.assertEqual(len(if_op.outputs), 1)
gradients_impl.gradients(output, x)
# Number of outputs does change because
# 1. `x` is a loop input so does not need to be accumulated.
# 2. 2.0 is a constant so it is not accumulated.
self.assertEqual(len(if_op.outputs), 1)
gradients_impl.gradients(output, x)
# Computing the gradient again shouldn't rewrite if_op again.
self.assertEqual(len(if_op.outputs), 1)
# pylint: enable=g-deprecated-assert
class CondV2CollectionTest(test.TestCase):
def testCollectionIntValueAccessInCond(self):
"""Read values from graph collections inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_const = constant_op.constant(ops.get_collection("x")[0])
y_const = constant_op.constant(ops.get_collection("y")[0])
return math_ops.add(x_const, y_const)
cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionTensorValueAccessInCond(self):
"""Read tensors from collections inside of cond_v2 & use them."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_read = ops.get_collection("x")[0]
y_read = ops.get_collection("y")[0]
return math_ops.add(x_read, y_read)
cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionIntValueWriteInCond(self):
"""Make sure Int writes to collections work inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
def true_fn():
z = math_ops.add(x, y)
ops.add_to_collection("z", 7)
return math_ops.mul(x, z)
def false_fn():
z = math_ops.add(x, y)
return math_ops.mul(x, z)
cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd.eval(), 14)
read_z_collection = ops.get_collection("z")
self.assertEquals(read_z_collection, [7])
class CondV2ContainerTest(test.TestCase):
def testContainer(self):
"""Set containers outside & inside of cond_v2.
Make sure the containers are set correctly for both variable creation
(tested by variables.Variable) and for stateful ops (tested by FIFOQueue)
"""
self.skipTest("b/113048653")
with ops.Graph().as_default() as g:
with self.session(graph=g):
v0 = variables.Variable([0])
q0 = data_flow_ops.FIFOQueue(1, dtypes.float32)
def container(node):
return node.op.get_attr("container")
self.assertEqual(compat.as_bytes(""), container(v0))
self.assertEqual(compat.as_bytes(""), container(q0.queue_ref))
def true_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2t"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2t"), container(v2))
self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(2.0)
def false_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2f"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2f"), container(v2))
self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(6.0)
with ops.container("l1"):
cnd_true = cond_v2.cond_v2(
constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd_true.eval(), 2)
cnd_false = cond_v2.cond_v2(
constant_op.constant(False), true_fn, false_fn)
self.assertEquals(cnd_false.eval(), 6)
v4 = variables.Variable([3])
q4 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v5 = variables.Variable([4])
q5 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v4))
self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref))
self.assertEqual(compat.as_bytes(""), container(v5))
self.assertEqual(compat.as_bytes(""), container(q5.queue_ref))
class CondV2ColocationGroupAndDeviceTest(test.TestCase):
def testColocateWithBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3)
def fn2():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
def testColocateWithInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn2():
with ops.colocate_with(b.op):
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant([2.0], name="d")
self.assertEqual([b"loc:@a"], d.op.colocation_groups())
def testColocateWithInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.device("/device:CPU:1"):
b = constant_op.constant([2.0], name="b")
def fn():
with ops.colocate_with(b.op):
c = math_ops.add(a, a, name="c")
return c
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
# We expect there to be two partitions because of the
# colocate_with. We are only running the cond, which has a data
# dependency on `a` but not on `b`. So, without the colocate_with
# we would expect execution on just one device.
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def testDeviceBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
def fn():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
with ops.device("/device:CPU:0"):
self.assertIn(
compat.as_bytes("CPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn, fn)))
def fn2():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
if test_util.is_gpu_available():
with ops.device("/device:GPU:0"):
self.assertIn(
compat.as_bytes("GPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn2, fn2)))
else:
self.skipTest("Test requires a GPU to check GPU device placement.")
def testDeviceInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})):
def fn2():
with ops.device("/device:CPU:1"):
c = constant_op.constant(3.0)
self.assertEqual("/device:CPU:1", c.op.device)
return c
with ops.device("/device:CPU:0"):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant(4.0)
self.assertEqual("/device:CPU:0", d.op.device)
def testDeviceInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
def fn():
with ops.device("/device:CPU:1"):
c = math_ops.add(a, a, name="c")
return c
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def _cond(pred, true_fn, false_fn, name):
if _is_old_cond():
return control_flow_ops.cond(pred, true_fn, false_fn, name=name)
else:
return cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
def _is_old_cond():
return isinstance(ops.get_default_graph()._get_control_flow_context(),
control_flow_ops.CondContext)
def _has_node_with_op(run_metadata, op_type):
"""Whether any node in `run_metadata.partition_graphs` matches `op_type`."""
for graph in run_metadata.partition_graphs:
for node in graph.node:
if node.op == op_type:
return True
return False
if __name__ == "__main__":
test.main()
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
'''
Utility functions for AFNI files
Created on Feb 19, 2012
@author: Nikolaas. N. Oosterhof (nikolaas.oosterhof@unitn.it)
'''
import os, subprocess, time, datetime, collections
import os.path as op
def as_list(v):
'''makes this a singleton list if the input is not a list'''
if type(v) not in [list, tuple]:
v = [v]
return v
def afni_fileparts(fn):
'''File parts for afni filenames.
Returns a tuple with these four parts.
Also works for .nii files, in which case the third part is the empty
Not tested for other file types
Parameters
----------
whole filename
PATH/TO/FILE/NAME+orig.HEAD
Returns
-------
fullpath: str
PATH/TO/FILE
rootname: str
NAME
orientation: str
+orig
extensions: str
.HEAD
'''
tail, head = os.path.split(fn)
s = head.split('+')
name = s[0]
orient = '+' + s[1] if len(s) == 2 else ''
afniorients = ['+orig', '+tlrc', '+acpc']
ext = None
for a in afniorients:
if orient.startswith(a):
#ext=orient[len(a):]
orient = a
ext = ".HEAD"
#if ext=='.':
# ext=''
if ext is None:
s = name.split(".")
if len(s) > 1:
ext = "." + ".".join(s[1:])
name = s[0]
else:
ext = ''
return tail, name, orient, ext
def afni_fileexists(fn):
'''
Parameters
----------
fn : str
AFNI filename (possibly without .HEAD or .BRIK extension)
Returns
-------
bool
True iff fn exists as AFNI file
'''
p, n, o, e = afni_fileparts(fn)
if o:
return os.path.isfile('%s/%s%s.HEAD' % (p, n, o))
else:
return (e in ['.nii', '.nii.gz']) and os.path.isfile(fn)
def run_cmds(cmds, env=None, dryrun=False):
'''exectute a list of commands in the shell'''
if env is None:
env = os.environ
# if cmds is just one command, make a singleton list
cmds = as_list(cmds)
# run each command
for cmd in cmds:
print("** Will execute the following commands:")
for c in cmd.split(';'):
print '** - %s' % c
if not dryrun:
print("**>> Starting now:")
subprocess.check_call(cmd, env=env, shell=True)
print("**<< ... completed execution")
def cmd_capture_output(cmd, env=None):
if env is None:
env = os.environ
return subprocess.check_output(cmd, env=env, shell=True)
def which(f, env=None):
'''Finds the full path to a file in the path
Parameters
----------
f: str
Filename of executable
env (optional):
Environment in which path is found.
By default this is the environment in which python runs
Returns
str
Full path of 'f' if 'f' is executable and in the path, 'f' itself
if 'f' is a path, None otherwise
'''
if env == None:
env = os.environ
def is_executable(fullpath):
return os.path.exists(fullpath) and os.access(fullpath, os.X_OK)
[p, n] = os.path.split(f)
if p:
return f
else:
for path in env['PATH'].split(os.pathsep):
fullfn = os.path.join(path, n)
if is_executable(fullfn):
return fullfn
return None
def _package_afni_nibabel_for_standalone(outputdir, rootname='python'):
'''
helper function to put mvpa2.support.{afni,nibabel} into another
directory (outputdir) where it can function as a stand-alone package
'''
outputdir_files = os.path.join(outputdir, rootname)
for d in (outputdir, outputdir_files):
if not os.path.exists(d):
os.mkdir(d)
fullpath = op.realpath(__file__)
fullpath_parts = fullpath.split('/')
if (len(fullpath_parts) < 4 or
fullpath.split('/')[-4:-1] != ['mvpa2', 'support', 'afni']):
raise ValueError('This script is not in mvpa2.support.afni. '
'Packaging for stand-alone is not supported')
replacements = {'def warning(x): print x':'def warning(x): print x'}
rootdir = os.path.join(op.split(fullpath)[0], '..')
parent_pkg = 'mvpa2.support'
pkgs = ['afni', 'nibabel']
srcdirs = [os.path.join(rootdir, pkg) for pkg in pkgs]
input_path_fns = [os.path.join(d, f) for d in srcdirs
for f in os.listdir(d)
]
is_python_file = lambda fn: fn.endswith('.py') and not fn.endswith('__.py')
input_path_fns = filter(is_python_file, input_path_fns)
print input_path_fns
outputfns = []
for path_fn in input_path_fns:
fn = os.path.split(path_fn)[1]
with open(path_fn) as f:
lines = f.read().split('\n')
newlines = []
for line in lines:
newline = None
for old, new in replacements.iteritems():
line = line.replace(old, new)
if 'import' in line:
words = line.split()
for pkg in pkgs:
full_pkg = parent_pkg + '.' + pkg
trgwords = ['from', full_pkg, 'import']
n = len(trgwords)
if len(words) >= n and words[:n] == trgwords:
# find how many trailing spaces
i = 0
while line.find(' ', i) == i:
i += 1
# get everything from import to end of line
# with enough spaces in front
newline = (' ' * i) + ' '.join(words[(n - 1):])
#print line
#print ' -> ', newline
break
else:
if pkg in words:
print("Not supported in %s: %s" % (path_fn, line))
newline=False
break
if newline is False:
newlines=[]
break
if newline is None:
newline = line
newlines.append(newline)
if not len(newlines):
print "skipping %s" % fn
continue
if fn.startswith('lib_'):
repls = [('lib_', 'pymvpa2-'), ('.py', ''), ('_', '-')]
srcbinfn = fn
for src, trg in repls:
srcbinfn = srcbinfn.replace(src, trg)
parentfn = os.path.join(rootdir, '..', '..', 'bin', srcbinfn)
print parentfn
if os.path.exists(parentfn):
with open(parentfn) as pf:
plines = pf.read().split('\n')
in_main = False
for line in plines:
if '__main__' in line:
in_main = True
if in_main:
newlines.append(line)
newlines = [plines[0]] + newlines
else:
raise ValueError("not found: %s" % parentfn)
trgfn = os.path.join(outputdir_files, fn.replace('lib_', ''))
else:
trgfn = op.join(outputdir_files, fn)
with open(trgfn, 'w') as f:
f.write('\n'.join(newlines))
is_executable = newlines[0].startswith('#!')
chmod_ = 0777 if is_executable else 0666
os.chmod(trgfn, chmod_)
print "Written file %s in %s" % (fn, outputdir_files)
outputfns.append(os.path.split(trgfn)[1])
readme = ('''
AFNI I/O and wrapper functions in python
Copyright 2010-2014 Nikolaas N. Oosterhof <nikolaas.oosterhof@unitn.it>
The software in the following files is covered under the MIT License
(included below):
''' +
'\n'.join(map(lambda x:' - ' + x, outputfns)) +
'''
Parts of this software is or will be included in PyMVPA.
For information see http://www.pymvpa.org.
-------------------------------------------------------------------------
The MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
''')
readmefn = op.join(outputdir_files, 'COPYING')
with open(readmefn, 'w') as f:
f.write(readme)
|
|
"""Defines the main menu module for a program that inputs a MemberHub directory dump,
a school roster, and a hub map to perform analyses on the MemberHub directory.
"""
import roster_tools
import hub_map_tools
import family
# import person
import roster
def FindMissingEmail(arg_list):
"""menu.FindMissingEmail
INPUTS:
- directory -- list containing the MemberHub directory families
- map_d -- dictionary mapping teacher names to hub IDs
OUTPUTS:
Prints to standard output statistics about families with and without
email addresses, and give the option to display the lists.
ASSUMPTIONS:
None.
"""
##
## extract copies of the arguments so they are not accidentally modified,
## and initialize method variables
directory = arg_list[0].copy()
hub_map_d = arg_list[1].copy()
total_adult_count = 0
no_email_person = []
no_email_family = []
partial_family = []
##
## create a dictionary of classroom hubs that will hold lists of
## adults without emails
map_d = hub_map_tools.CreateEmptyHubDictionary(hub_map_d)
##
## loop over all the families in the directory
for entry_family in directory:
##
## add the number of adults in this family to the count of all the adults
## in the directory
total_adult_count += len(entry_family.adults)
##
## for each family, count number of adults without emails
this_family_no_email_count = 0
##
## loop over each adult in the family
for adult in entry_family.adults:
##
## check whether this adult DOES NOT an email address
if adult.DoesNotListEmailAddress():
##
## found adult without an email, so add to list of persons without email
no_email_person.append(adult)
##
## increment the number of adults in this family without email
this_family_no_email_count += 1
##
## loop over all the adult's hubs, and add them to the hub
## dictionary
for hub in adult.hubs:
if hub in map_d.keys():
map_d[hub].append(adult)
##
## if this family's no email count is the same as number of adults,
## then append this family to the no_email_family list
if this_family_no_email_count == len(entry_family.adults):
no_email_family.append(entry_family)
##
## otherwise, if fewer adults do not have email than are in the family
## then append this family to the partial_family list
elif this_family_no_email_count > 0:
partial_family.append(entry_family)
return total_adult_count, no_email_person, no_email_family, partial_family, map_d
def FindOrphans(directory):
"""menu.FindOrphans
INPUTS:
- directory -- list containing the MemberHub directory families
OUTPUTS:
Prints to standard output the children in the directory who do not have an
parent associated with their entry.
ASSUMPTIONS:
None.
"""
##
## make copy of the argument so it is not accidentally modified,
## and initialize method variables
local_dir = directory.copy()
orphan_families = []
##
## loop over all families in the directory to find orphan families
for entry_family in local_dir:
if entry_family.IsOrphan():
orphan_families.append(entry_family)
return orphan_families
def FindChildless(directory):
"""menu.FindChildless
INPUTS:
- directory -- list containing the MemberHub directory families
OUTPUTS:
Prints to standard output the adults in the directory who do not have a
child associated with their entry.
ASSUMPTIONS:
None.
"""
##
## make copy of the argument so it is not accidentally modified,
## and initialize method variables
local_dir = directory.copy()
childless_families = []
##
## loop over all families in the directory to find childless families
for entry_family in local_dir:
if entry_family.IsChildless():
childless_families.append(entry_family)
return childless_families
def FindHubless(arg_list):
"""menu.FindHubless
INPUTS:
- directory -- list containing the MemberHub directory families
- map_d -- dictionary mapping teacher names to hub IDs
OUTPUTS:
Prints to standard output the names in the directory who are not members of
at least one classroom hub.
ASSUMPTIONS:
None.
"""
##
## extract copies of the arguments so they are not accidentally modified,
## and initialize method variables
directory = arg_list[0].copy()
map_d = arg_list[1].copy()
hubless_adults = []
hubless_children = []
##
## loop over all the families to find any adults or children who are not
## in at least one classroom hub
for directory_family in directory:
for adult in directory_family.adults:
if not hub_map_tools.IsAnyHubClassroomHub(map_d, adult.hubs):
hubless_adults.append(adult)
for child in directory_family.children:
if not hub_map_tools.IsAnyHubClassroomHub(map_d, child.hubs):
hubless_children.append(child)
return hubless_adults, hubless_children
def FindChildrenInMultipleClassroom(arg_list):
"""menu.FindChildrenInMultipleClassroom
INPUTS:
- directory -- list containing the MemberHub directory families
- map_d -- dictionary mapping teacher names to hub IDs
OUTPUTS:
Prints to standard output the students in the directory who are members of
more than one classroom hub.
ASSUMPTIONS:
None.
"""
##
## extract copies of the arguments so they are not accidentally modified,
## and initialize method variables
directory = arg_list[0].copy()
map_d = arg_list[1].copy()
hubful_children = []
##
## loop over all the families in the directory to find children who are in
## more than one classroom hub
for directory_family in directory:
for child in directory_family.children:
if hub_map_tools.IsInMultipleClassroomHubs(map_d, child.hubs):
hubful_children.append(child)
return hubful_children
def FindAdultsWithoutAccounts(arg_list):
"""menu.FindAdultsWithoutAccounts
INPUTS:
- directory -- list of families from a MemberHub directory dump.
- hub_map -- the school's hub map
OUTPUTS:
Provides the option to write to standard output or to a file the list of adults who
do not have accounts, separated by whether their profile has an email address or not.
"""
##
## make copy of the argument so it is not accidentally modified,
## and initialize method variables
local_dir = arg_list[0].copy()
local_hub_map = arg_list[1].copy()
no_account_with_email = []
no_account_without_email = []
teacher_with_no_account = []
teacher_without_email = []
##
## create a dictionary of classroom hubs that will hold lists of
## adults without accounts
with_email_map = hub_map_tools.CreateEmptyHubDictionary(local_hub_map)
without_email_map = hub_map_tools.CreateEmptyHubDictionary(local_hub_map)
##
## loop over all the families in the directory, and find those with
## no accounts, and separate those between those with an email and those
## without an email.
for this_family in local_dir:
for this_adult in this_family.adults:
if this_adult.account_created == "":
if this_adult.DoesNotListEmailAddress():
if this_adult.IsWithSchool():
teacher_without_email.append(this_adult)
else:
no_account_without_email.append(this_adult)
for hub in this_adult.hubs:
if hub in without_email_map.keys():
without_email_map[hub].append(this_adult)
##
## this adult does have an email, so check if they are with the school
elif this_adult.IsWithSchool():
teacher_with_no_account.append(this_adult)
##
## this adult does have an email and is not with the school, so add to parents
## without accounts but with emails
else:
no_account_with_email.append(this_adult)
for hub in this_adult.hubs:
if hub in with_email_map.keys():
with_email_map[hub].append(this_adult)
return teacher_without_email, no_account_without_email, teacher_with_no_account, no_account_with_email, without_email_map, with_email_map
def PrintNotInDirectory(arg_list):
"""menu.PrintNotInDirectory
INPUTS:
- directory -- list containing the MemberHub directory families
- roster -- list containing the school roster families
OUTPUTS:
- entriless -- returns list of families in the school roster that could not
be found in the directory
Also prints to standard output the names in the school roster who are not in the
MemberHub directory.
ASSUMPTIONS:
None.
"""
##
## extract copies of the arguments so they are not accidentally modified,
## and initialize method variables
local_dir = arg_list[0].copy()
local_rost = arg_list[1].copy()
entriless = []
##
## loop over all the families in the roster...
for r_family in local_rost:
##
## ...to compare to each family in the directory
for d_family in local_dir:
##
## look for matches between roster and directory families
if d_family.IsSameFamily(r_family):
##
## once a family match is found, check whether the roster family has
## children who are not in the directory
if d_family.HasNewChildren(r_family):
temp_family = family.Family()
temp_family.FormFamilyWithNewChildren(d_family,r_family)
entriless.append(temp_family)
break
##
## if the roster family was not found in the directory, add it to list of
## families without directory entry
else:
entriless.append(r_family)
return entriless
def FindParentChildrenHubMismatches(directory):
"""menu.FindParentChildrenHubMismatches
INPUTS:
- directory -- list containing the MemberHub directory families
OUTPUTS:
- at user prompt, prints to standard output the family members and their
hubs that have adults who are not members of all their children's hubs
ASSUMPTIONS:
- None.
"""
##
## extract copies of the arguments so they are not accidentally modified,
## and initialize method variables
local_dir = directory.copy()
mismatches = []
##
## loop over all the families in the directory
for this_family in local_dir:
##
## accumulate all the family's children's hubs into one list
children_hubs = []
for this_child in this_family.children:
children_hubs.extend(this_child.hubs)
##
## next, accumulate list of adults who are not members of their
## children's hubs
for this_adult in this_family.adults:
for child_hub in children_hubs:
if child_hub not in this_adult.hubs:
mismatches.append(this_family)
break
return mismatches
def FindUnsedErrata(errata_file='roster_errata.csv', roster_file=None):
"""menu.FindUnsedErrata
INPUTS:
- none
OUTPUTS:
Prints the roster errata entries that are no longer found in the roster, and can be
removed.
ASSUMPTIONS:
- none
"""
##
## Read the adults from the most recent roster file
adults_list = roster_tools.ReadRosterAdultsFromMostRecent(file_name=roster_file)
##
## Next, instantiate a Roster class, which includes the default errata, and retrieve
## that dictionary
temp = roster.Roster(show_errors='y', file_name=errata_file)
all_errata = temp.GetErrata()
##
## for each error listed in the errata, look for it in the adults list
unused_errata = []
for entry in all_errata.keys():
if entry not in adults_list:
unused_errata.append(entry)
return unused_errata, all_errata
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Tintri, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from tintri.common import TintriServerError
from tintri.v310 import Tintri
from tintri.v310 import VirtualMachineFilterSpec
from tintri.v310 import VirtualMachineQoSConfig
from tintri.v310 import MultipleSelectionRequest
"""
This Python script configures QoS on the first 2 live VMs
QoS configuration consists of mininum and maximum IOPs.
This script demostrates multiple ways to modify the QoS.
Command usage: qos_config <server_name> <userName> <password> <min_iops> <max_iops>
"""
# For exhaustive messages on console, make it to True; otherwise keep it False
debug_mode = False
# Class to hold the VM name, UUID, and QOS information, min and max IOPs.
class VmQosInfo:
def __init__(self, name, uuid, min_value, max_value):
self.name = name
self.uuid = uuid
self.min_value = min_value # stored as integer
self.max_value = max_value # stored as integer
def get_name(self):
return self.name
def get_uuid(self):
return self.uuid
def get_min_qos(self):
return self.min_value
def get_max_qos(self):
return self.max_value
def set_min_qos(self, new_value):
self.min_value = new_value
def set_max_qos(self, new_value):
self.max_value = new_value
def __str__(self):
return ("VM name: " + self.name + " UUID: " + self.uuid +
" (" + str(self.min_value) + ", " + str(self.max_value) + ")")
# Helper print routines.
def print_with_prefix(prefix, out):
print(prefix + out)
return
def print_debug(out):
if debug_mode:
print_with_prefix("[DEBUG] : ", out)
return
def print_info(out):
print_with_prefix("[INFO] : ", out)
return
def print_error(out):
print_with_prefix("[ERROR] : ", out)
return
# Prints was and now QOS values.
def print_qos_info(vm, vm_info):
qos_config = vm_info.qosConfig
print(vm.get_name() + " was: " + str(vm.get_min_qos()) + ", " + str(vm.get_max_qos()))
print(vm.get_name() + " now: " + str(qos_config.minNormalizedIops) + ", " + str(qos_config.maxNormalizedIops))
# main
if len(sys.argv) < 6:
print("\nSets the first 2 VMs QoS values to the specified values, and then returns to the original values.\n")
print("Usage: " + sys.argv[0] + " server_name user_name password min_value, max_value\n")
sys.exit(-1)
server_name = sys.argv[1]
user_name = sys.argv[2]
password = sys.argv[3]
new_min_value = sys.argv[4]
new_max_value = sys.argv[5]
try:
# instantiate the Tintri server.
tintri = Tintri(server_name)
# Get version and product
version_info = tintri.version
product_name = version_info.productName
if (not tintri.is_vmstore()):
raise TintriServerError(0, -1, "Tintri server needs to be VMstore, not " + product_name)
preferredVersion = version_info.preferredVersion
print("API Version: " + preferredVersion)
# Login to TGC
tintri.login(user_name, password)
except TintriServerError as tse:
print_error(tse.__str__())
sys.exit(2)
try:
vm_filter_spec = VirtualMachineFilterSpec()
vm_filter_spec.live = True
# Prime the VM information pump
vms = tintri.get_vms(filters = vm_filter_spec)
num_vms = vms.filteredTotal
if num_vms == 0:
raise TintriServerError(0, -2, "No live VMs present")
print_info(str(num_vms) + " live VMs present")
if num_vms < 2:
raise TintriServerError(0, -3, "Need at least 2 VMs")
# Create the first VM object.
vm1 = VmQosInfo(vms[0].vmware.name, vms[0].uuid.uuid,
vms[0].qosConfig.minNormalizedIops, vms[0].qosConfig.maxNormalizedIops)
# Create the second VM object.
vm2 = VmQosInfo(vms[1].vmware.name, vms[1].uuid.uuid,
vms[1].qosConfig.minNormalizedIops, vms[1].qosConfig.maxNormalizedIops)
new_min_qos = int(new_min_value)
new_max_qos = int(new_max_value)
# Show using Multi-selection Request
# Create new QoS object with the fields to be changed
modify_qos_info = VirtualMachineQoSConfig()
modify_qos_info.minNormalizedIops = new_min_qos
modify_qos_info.maxNormalizedIops = new_max_qos
# Create the MultipleSelectionRequest object
MS_Request = MultipleSelectionRequest()
MS_Request.ids = [vm1.get_uuid(), vm2.get_uuid()]
MS_Request.newValue = modify_qos_info
MS_Request.propertyNames = ["minNormalizedIops", "maxNormalizedIops"]
print_info("Changing min and max QOS values to (" + new_min_value + ", " + new_max_value + ")")
# Update the min and max IOPs using a multiple selection request.
tintri.update_vms_qos_config(request = MS_Request)
# Get VM 1 value to show that it changed.
vm1_info = tintri.get_vm(vm1.get_uuid())
print_qos_info(vm1, vm1_info)
# Get VM 2 value to show that it changed.
vm2_info = tintri.get_vm(vm2.get_uuid())
print_qos_info(vm2, vm2_info)
# Show using a list of VMs.
vm_uuids = [vm1.get_uuid(), vm2.get_uuid()]
print_info("Changing max QOS value plus 100")
# Update the max IOPs again using a list of VM UUIDs, and max_normalized_iops.
tintri.update_vms_qos_config(vm_uuids, max_normalized_iops = new_max_qos+100)
# Get VM 1 value to show that it changed.
vm1_info = tintri.get_vm(vm1.get_uuid())
print_qos_info(vm1, vm1_info)
# Get VM 2 value to show that it changed.
vm2_info = tintri.get_vm(vm2.get_uuid())
print_qos_info(vm2, vm2_info)
print_info("Changing min and max QOS values to Original values")
# Show updating one VM at a time using positional parameters.
# Update the first VM.
tintri.update_vms_qos_config([vm1.get_uuid()], vm1.get_min_qos(), vm1.get_max_qos())
# Update the second VM.
tintri.update_vms_qos_config([vm2.get_uuid()], vm2.get_min_qos(), vm2.get_max_qos())
# Get VM 1 value to show that it back to the original values.
vm1_info = tintri.get_vm(vm1.get_uuid())
print_qos_info(vm1, vm1_info)
# Get VM 2 value to show that it back to the original values.
vm2_info = tintri.get_vm(vm2.get_uuid())
print_qos_info(vm2, vm2_info)
except TintriServerError as tse:
print_error(tse.__str__())
tintri.logout()
sys.exit(2)
tintri.logout()
|
|
"""User statistics M/C.
.. moduleauthor:: Dave Zimmelman <zimmed@zimmed.io>
Exports:
"""
import math
from core.datamodel import DataModelController, Collection
from core.decorators import classproperty
class UserStatistics(DataModelController):
@classproperty
def MODEL_RULES(cls):
rules = super(UserStatistics, cls).MODEL_RULES
rules.update({
'games_won': ('games_won', int, None),
'games_lost': ('games_lost', int, None),
'history': ('history', Collection.List(int), None),
'twofers': ('twofers', int, None),
'won_bet_rounds': ('won_bet_rounds', int, None),
'lost_bet_rounds': ('lost_bet_rounds', int, None),
'won_counter_rounds': ('won_counter_rounds', int, None),
'lost_counter_rounds': ('lost_counter_rounds', int, None),
'avg_win_bet': ('avg_win_bet', int, None),
'avg_counter_win': ('avg_counter_win', int, None),
'team_mates': ('team_mates', Collection.Dict(list), None),
'elo': ('elo', float, None),
'rank': ('rank', int, None),
'ranked_wins': ('ranked_wins', int, None),
'ranked_losses': ('ranked_wins', int, None)
})
return rules
@classproperty
def INIT_DEFAULTS(cls):
defaults = super(UserStatistics, cls).INIT_DEFAULTS
defaults.update({
'games_won': 0,
'games_lost': 0,
'history': [],
'twofers': 0,
'won_bet_rounds': 0,
'lost_bet_rounds': 0,
'won_counter_rounds': 0,
'lost_counter_rounds': 0,
'avg_win_bet': 0,
'avg_counter_win': 0,
'elo': 600.0,
'rank': 1,
'ranked_wins': 0,
'ranked_losses': 0,
'team_mates': {}
})
return defaults
@classmethod
def restore(cls, data_model, data_store, **kwargs):
ctrl = data_store.get_controller(cls, data_model.uid)
if not ctrl:
kwargs.update({
'games_won': data_model.games_won,
'games_lost': data_model.games_lost,
'history': data_model.history,
'twofers': data_model.twofers,
'won_bet_rounds': data_model.won_bet_rounds,
'lost_bet_rounds': data_model.lost_bet_rounds,
'won_counter_rounds': data_model.won_counter_rounds,
'lost_counter_rounds': data_model.lost_counter_rounds,
'avg_win_bet': data_model.avg_win_bet,
'avg_counter_win': data_model.avg_counter_win,
'elo': data_model.elo,
'rank': data_model.rank,
'ranked_wins': data_model.ranked_wins,
'team_mates': data_model.team_mates
})
ctrl = super(UserStatistics, cls).restore(
data_model, data_store, **kwargs)
return ctrl
def won_bet_round(self, bet):
self.avg_win_bet = _avg(self.won_bet_rounds,
self.avg_win_bet, bet)
self.won_bet_rounds += 1
if bet is 100:
self.twofers += 1
def lost_bet_round(self):
self.lost_bet_rounds += 1
def won_counter_round(self, points):
self.avg_counter_win = _avg(self.won_counter_rounds,
self.avg_counter_win, points)
self.won_counter_rounds += 1
def lost_counter_round(self):
self.lost_counter_rounds += 1
def add_game_to_history(self, game_id):
self.history.insert(0, game_id)
self._update_model_collection('history', {'action': 'insert',
'index': 0})
def update_comp_game_stats(self, game_id, team_elo, opposing_team_elo,
win):
self.add_game_to_history(game_id)
if win:
self.games_won += 1
self.ranked_wins += 1
else:
self.ranked_losses += 1
self.games_lost += 1
games_played = self.ranked_wins + self.ranked_losses
elo_change = _elo_calc(team_elo, games_played, opposing_team_elo, win,
self.ranked_wins)
self.elo = min(200, self.elo + elo_change)
self.rank = _elo_rank(self.elo, self.ranked_wins)
def update_casual_game_stats(self, game_id, team_mate, opposing_team_elo,
win):
self.add_game_to_history(game_id)
if win:
self.games_won += 1
else:
self.games_lost += 1
self.rank = _elo_rank(self.elo, self.ranked_wins)
if team_mate not in self.team_mates:
self.team_mates[team_mate] = (0, 0)
self.team_mates[team_mate][1] = _performance_rating(
self.team_mates[team_mate][0],
self.team_mates[team_mate][1],
opposing_team_elo, win)
self.team_mates[team_mate][0] += 1
def update_free_game_stats(self, game_id):
self.add_game_to_history(game_id)
def _avg(count, avg, new_num):
"""Incremental average.
:param count: int -- The previous total.
:param avg: float -- The previous average.
:param new_num: int|float -- The new number.
:return: float -- The new average.
"""
if not count:
return float(new_num)
return (count * avg + new_num) / (count + 1.0)
def _performance_rating(count, rating, opposing_elo, win=True):
"""Elo-based performance rating.
Algorithm adapted from: https://en.wikipedia.org/wiki/Elo_rating_system#Performance_rating
:param count: int -- Number of games won in team.
:param rating: float -- Current team rating.
:param opposing_elo: float -- Average elo of opposing team.
:param win: bool -- Whether to update with a win or a loss.
:return: float -- New team rating
"""
add = 400 if win else -400
if not count:
return opposing_elo + add
return (rating * count + opposing_elo + add) / (count + 1.0)
def _effective_games(num_games, player_elo):
"""Determine effective number of games for elo calculation.
:param num_games: int
:param player_elo: float
:return: int -- Effective number of games.
"""
if player_elo > 2355:
return num_games
else:
fifty = 50 / math.sqrt(0.662 + 0.00000739 * math.pow(2569 - player_elo, 2))
num_games -= 50
if num_games < 0:
num_games = 0
return (num_games - 50) + int(0.5 + fifty)
def _prediction(player_elo, opponent_elo):
"""Standard elo prediction probability.
Based on the USCF rating algorithm. Predicts the probability of the player
winning over the opponent.
:param player_elo: float
:param opponent_elo: float
:return: float -- Probability of win.
"""
exponent = -1 * (player_elo - opponent_elo) / 400.0
return 1.0 / (1.0 + math.pow(10, exponent))
def _elo_calc(player_elo, num_games, opponent_elo, win, num_games_won):
"""Standard elo calculation.
Based on the USCF rating algorithm.
:param player_elo: float
:param num_games: int -- Total number of games played.
:param opponent_elo: float
:param win: bool
:param num_games_won: int -- Total number of games won.
:return: float -- Elo change.
"""
outcome = 1 if win else 0
effective_games = _effective_games(num_games, player_elo)
if num_games_won <= 10:
effective_games += num_games_won
prediction = _unranked_prediction(player_elo, opponent_elo)
else:
prediction = _prediction(player_elo, opponent_elo)
k_factor = 800.0 / effective_games
return k_factor * (outcome - prediction)
def _unranked_prediction(player_elo, opponent_elo):
"""Special elo prediction probability for unranked player.
:param player_elo: float
:param opponent_elo: float
:return: float -- Probability of win.
"""
if player_elo >= opponent_elo + 400:
prob = 1.0
elif player_elo <= opponent_elo - 400:
prob = 0.0
else:
prob = 0.5 + (player_elo - opponent_elo) / 800
return prob
def _elo_rank(player_elo, num_games_won):
if num_games_won < 10:
return 1
else:
return max(14, 1 + (player_elo / 200))
|
|
from pyglet.gl import *
from pyglet import font
from plot_object import PlotObject
from util import strided_range, billboard_matrix
from util import get_direction_vectors
from util import dot_product, vec_sub, vec_mag
from sympy.core import S
class PlotAxes(PlotObject):
def __init__(self, *args, **kwargs):
# initialize style parameter
style = kwargs.pop('style', '').lower()
# allow alias kwargs to override style kwarg
if kwargs.pop('none', None) is not None: style = 'none'
if kwargs.pop('frame', None) is not None: style = 'frame'
if kwargs.pop('box', None) is not None: style = 'box'
if kwargs.pop('ordinate', None) is not None: style = 'ordinate'
if style in ['', 'ordinate']:
self._render_object = PlotAxesOrdinate(self)
elif style in ['frame', 'box']:
self._render_object = PlotAxesFrame(self)
elif style in ['none']:
self._render_object = None
else: raise ValueError(("Unrecognized axes "
"style %s.") % (style))
# initialize stride parameter
stride = kwargs.pop('stride', 0.25)
try: stride = eval(stride)
except: pass
if isinstance(stride, (list, tuple)):
assert len(stride) == 3
self._stride = stride
else:
self._stride = [stride, stride, stride]
self._tick_length = float(kwargs.pop('tick_length', 0.1))
# setup bounding box and ticks
self._origin = [0,0,0]
self.reset_bounding_box()
def flexible_boolean(input, default):
if input in [True, False]:
return input
if input in ['f','F','false','False']: return False
if input in ['t','T','true','True']: return True
return default
# initialize remaining parameters
self.visible = flexible_boolean(kwargs.pop('visible',''), True)
self._overlay = flexible_boolean(kwargs.pop('overlay',''), True)
self._colored = flexible_boolean(kwargs.pop('colored',''), False)
self._label_axes = flexible_boolean(kwargs.pop('label_axes', ''), False)
self._label_ticks = flexible_boolean(kwargs.pop('label_ticks', ''), True)
# setup label font
self.font_face = kwargs.pop('font_face', 'Arial')
self.font_size = kwargs.pop('font_size', 28)
# this is also used to reinit the
# font on window close/reopen
self.reset_resources()
def reset_resources(self):
self.label_font = None
def reset_bounding_box(self):
self._bounding_box = [[None,None], [None,None], [None,None]]
self._axis_ticks = [[],[],[]]
def draw(self):
if self._render_object:
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT | GL_DEPTH_BUFFER_BIT)
if self._overlay: glDisable(GL_DEPTH_TEST)
self._render_object.draw()
glPopAttrib()
def adjust_bounds(self, child_bounds):
b = self._bounding_box
c = child_bounds
for i in [0,1,2]:
if abs(c[i][0]) is S.Infinity or abs(c[i][1]) is S.Infinity: continue
b[i][0] = [ min([b[i][0], c[i][0]]), c[i][0] ][ b[i][0] is None ]
b[i][1] = [ max([b[i][1], c[i][1]]), c[i][1] ][ b[i][1] is None ]
self._recalculate_axis_ticks(i)
def _recalculate_axis_ticks(self, axis):
b = self._bounding_box
if b[axis][0] is None or b[axis][1] is None:
self._axis_ticks[axis] = []
else:
self._axis_ticks[axis] = strided_range(b[axis][0], b[axis][1], self._stride[axis])
def toggle_visible(self):
self.visible = not self.visible
def toggle_colors(self):
self._colored = not self._colored
class PlotAxesBase(PlotObject):
def __init__(self, parent_axes):
self._p = parent_axes
def draw(self):
color = [ ([0.2,0.1,0.3], [0.2,0.1,0.3], [0.2,0.1,0.3]),
([0.9,0.3,0.5], [0.5,1.0,0.5], [0.3,0.3,0.9]) ][ self._p._colored ]
self.draw_background(color)
self.draw_axis(2, color[2])
self.draw_axis(1, color[1])
self.draw_axis(0, color[0])
def draw_background(self, color):
pass # optional
def draw_axis(self, axis, color):
raise NotImplementedError()
def draw_text(self, text, position, color, scale=1.0):
if len(color) == 3: color = (color[0], color[1], color[2], 1.0)
if self._p.label_font is None:
self._p.label_font = font.load(self._p.font_face,
self._p.font_size,
bold=True, italic=False)
label = font.Text(self._p.label_font, text,
color=color,
valign=font.Text.BASELINE,
halign=font.Text.CENTER)
glPushMatrix()
glTranslatef(*position)
billboard_matrix()
scale_factor = 0.005*scale
glScalef(scale_factor, scale_factor, scale_factor)
glColor4f(0,0,0,0)
label.draw()
glPopMatrix()
def draw_line(self, v, color):
o = self._p._origin
glBegin(GL_LINES)
glColor3f(*color)
glVertex3f(v[0][0] + o[0], v[0][1] + o[1], v[0][2] + o[2])
glVertex3f(v[1][0] + o[0], v[1][1] + o[1], v[1][2] + o[2])
glEnd()
class PlotAxesOrdinate(PlotAxesBase):
def __init__(self, parent_axes):
super(PlotAxesOrdinate, self).__init__(parent_axes)
def draw_axis(self, axis, color):
ticks = self._p._axis_ticks[axis]
radius = self._p._tick_length / 2.0
if len(ticks) < 2: return
# calculate the vector for this axis
axis_lines = [[0,0,0], [0,0,0]]
axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1]
axis_vector = vec_sub( axis_lines[1], axis_lines[0] )
# calculate angle to the z direction vector
pos_z = get_direction_vectors()[2]
d = abs( dot_product(axis_vector, pos_z) )
d = d / vec_mag(axis_vector)
# don't draw labels if we're looking down the axis
labels_visible = abs(d - 1.0) > 0.02
# draw the ticks and labels
for tick in ticks:
self.draw_tick_line(axis, color, radius, tick, labels_visible)
# draw the axis line and labels
self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible)
def draw_axis_line(self, axis, color, a_min, a_max, labels_visible):
axis_line = [[0,0,0], [0,0,0]]
axis_line[0][axis], axis_line[1][axis] = a_min, a_max
self.draw_line(axis_line, color)
if labels_visible: self.draw_axis_line_labels(axis, color, axis_line)
def draw_axis_line_labels(self, axis, color, axis_line):
if not self._p._label_axes: return
axis_labels = [axis_line[0][::], axis_line[1][::]]
axis_labels[0][axis] -= 0.3
axis_labels[1][axis] += 0.3
a_str = ['X', 'Y', 'Z'][axis]
self.draw_text("-" + a_str, axis_labels[0], color)
self.draw_text("+" + a_str, axis_labels[1], color)
def draw_tick_line(self, axis, color, radius, tick, labels_visible):
tick_axis = {0: 1, 1: 0, 2: 1}[axis]
tick_line = [[0,0,0], [0,0,0]]
tick_line[0][axis] = tick_line[1][axis] = tick
tick_line[0][tick_axis], tick_line[1][tick_axis] = -radius, radius
self.draw_line(tick_line, color)
if labels_visible: self.draw_tick_line_label(axis, color, radius, tick)
def draw_tick_line_label(self, axis, color, radius, tick):
if not self._p._label_axes: return
tick_label_vector = [0,0,0]
tick_label_vector[axis] = tick
tick_label_vector[{0: 1, 1: 0, 2: 1}[axis]] = [-1,1,1][axis]*radius*3.5
self.draw_text(str(tick), tick_label_vector, color, scale=0.5)
class PlotAxesFrame(PlotAxesBase):
def __init__(self, parent_axes):
super(PlotAxesFrame, self).__init__(parent_axes)
def draw_background(self, color):
pass
def draw_axis(self, axis, color):
raise NotImplementedError()
|
|
# vim:ts=4:sts=4:sw=4:expandtab
from datetime import datetime
from django.db import models
import inspect
from satori.ars.model import *
from satori.ars import perf
from satori.core.export.type_helpers import DefineException, ArsDeferredStructure, python_to_ars_type
from satori.core.export.pc import AccessDenied
ArgumentNotFound = DefineException('ArgumentNotFound', 'The specified argument cannot be found: model={model}, id={id}',
[('model', unicode, False), ('id', long, False)])
CannotReturnObject = DefineException('CannotReturnObject', 'You don\'t have rights to view the returned object')
CannotDeleteObject = DefineException('CannotDeleteObject', 'You can\'t delete this object')
field_basic_types = {
models.AutoField: long,
models.IntegerField: int,
models.CharField: unicode,
models.TextField: unicode,
models.BooleanField: bool,
models.DateTimeField: datetime,
models.IPAddressField: unicode,
models.FloatField: float,
}
field_type_map = {}
def django_field_to_python_type(field):
if not field in field_type_map:
field_type = None
if type(field) in field_basic_types:
field_type = field_basic_types[type(field)]
if isinstance(field, models.ForeignKey):
if issubclass(field.rel.to, models.Model):
field_type = DjangoId(field.rel.to.__name__)
else:
field_type = DjangoId(field.rel.to.split('.')[-1])
field_type_map[field] = field_type
return field_type_map[field]
ars_django_id = {}
ars_django_structure = {}
ars_django_id_list = {}
ars_django_structure_list = {}
class ArsDjangoId(ArsTypeAlias):
def __init__(self, model):
super(ArsDjangoId, self).__init__(name=(model.__name__ + 'Id'), target_type=ArsInt64)
self.model = model
def do_needs_conversion(self):
return True
def do_convert_to_ars(self, value):
# # decided not to enforce
# if not Privilege.demand(value, 'VIEW'):
# raise CannotReturnObject()
return value.id
def do_convert_from_ars(self, value):
try:
ret = Privilege.wrap(self.model, select=['VIEW']).get(id=value)
except self.model.DoesNotExist:
raise ArgumentNotFound(model=self.model.__name__, id=value)
else:
if not Privilege.demand(ret, 'VIEW'):
raise AccessDenied()
return ret
class DjangoId(object):
def __init__(self, model):
super(DjangoId, self).__init__()
self.model = model
def ars_type(self):
if not self.model in ars_django_id:
raise RuntimeError('Model not exported: {0}'.format(self.model))
return ars_django_id[self.model]
class ArsDjangoStructure(ArsDeferredStructure):
def __init__(self, model, fields, extra_fields):
super(ArsDjangoStructure, self).__init__(model.__name__ + 'Struct', [])
self.model = model
self.django_fields = fields
self.django_extra_fields = extra_fields
def init_fields(self):
field_dict = dict((field.name, field) for field in self.model._meta.fields)
for (field_name, field_permission) in self.django_fields:
self.add_field(name=field_name, type=python_to_ars_type(django_field_to_python_type(field_dict[field_name])), optional=True)
for (field_name, field_type, field_permission) in self.django_extra_fields:
self.add_field(name=field_name, type=python_to_ars_type(field_type), optional=True)
def do_needs_conversion(self):
return True
def do_convert_to_ars(self, value):
if not hasattr(value, '_can_VIEW'):
value = Privilege.wrap(value.__class__, struct=True).get(id=value.id)
if not Privilege.demand(value, 'VIEW'):
raise CannotReturnObject()
ret = self.get_class()()
for (field_name, field_permission) in self.django_fields:
if Privilege.demand(value, field_permission):
field_type = self.fields[field_name].type
if field_type.needs_conversion():
if isinstance(field_type, ArsDjangoId):
setattr(ret, field_name, getattr(value, field_name + '_id'))
else:
setattr(ret, field_name, field_type.convert_to_ars(getattr(value, field_name)))
else:
setattr(ret, field_name, getattr(value, field_name))
for (field_name, field_type, field_permission) in self.django_extra_fields:
if Privilege.demand(value, field_permission):
if field_type.needs_conversion():
setattr(ret, field_name, field_type.convert_to_ars(getattr(value, field_name)))
else:
setattr(ret, field_name, getattr(value, field_name))
return ret
def do_convert_from_ars(self, value):
return super(ArsDjangoStructure, self).do_convert_from_ars(value)
class DjangoStruct(object):
def __init__(self, model):
super(DjangoStruct, self).__init__()
self.model = model
def __call__(self, *args, **kwargs):
return self.ars_type().get_class()(*args, **kwargs)
def ars_type(self):
if not self.model in ars_django_structure:
raise RuntimeError('Model not exported: {0}'.format(self.model))
return ars_django_structure[self.model]
class ArsDjangoIdList(ArsList):
def __init__(self, model):
super(ArsDjangoIdList, self).__init__(element_type=ars_django_id[model])
self.model = model
def do_needs_conversion(self):
return True
def do_convert_to_ars(self, value):
return super(ArsDjangoIdList, self).do_convert_to_ars(Privilege.wrap(value, where=['VIEW'], select=['VIEW']))
def do_convert_from_ars(self, value):
return super(ArsDjangoIdList, self).do_convert_from_ars(value)
class DjangoIdList(object):
def __init__(self, model):
super(DjangoIdList, self).__init__()
self.model = model
def ars_type(self):
if not self.model in ars_django_id_list:
raise RuntimeError('Model not exported: {0}'.format(self.model))
return ars_django_id_list[self.model]
class ArsDjangoStructureList(ArsList):
def __init__(self, model):
super(ArsDjangoStructureList, self).__init__(element_type=ars_django_structure[model])
self.model = model
def do_needs_conversion(self):
return True
def do_convert_to_ars(self, value):
return super(ArsDjangoStructureList, self).do_convert_to_ars(Privilege.wrap(value, where=['VIEW'], struct=True))
def do_convert_from_ars(self, value):
return super(ArsDjangoStructureList, self).do_convert_from_ars(value)
class DjangoStructList(object):
def __init__(self, model):
super(DjangoStructList, self).__init__()
self.model = model
def ars_type(self):
if not self.model in ars_django_structure_list:
raise RuntimeError('Model not exported: {0}'.format(self.model))
return ars_django_structure_list[self.model]
def generate_django_types(cls):
ars_django_id[cls] = ArsDjangoId(cls)
ars_django_id[cls.__name__] = ars_django_id[cls]
fields = []
extra_fields = []
for parent_cls in reversed(inspect.getmro(cls)):
if 'ExportMeta' in parent_cls.__dict__:
if hasattr(parent_cls.ExportMeta, 'fields'):
fields.extend(parent_cls.ExportMeta.fields)
if hasattr(parent_cls.ExportMeta, 'extra_fields'):
extra_fields.extend(parent_cls.ExportMeta.extra_fields)
ars_django_structure[cls] = ArsDjangoStructure(cls, fields, extra_fields)
ars_django_structure[cls.__name__] = ars_django_structure[cls]
ars_django_id_list[cls] = ArsDjangoIdList(cls)
ars_django_id_list[cls.__name__] = ars_django_id_list[cls]
ars_django_structure_list[cls] = ArsDjangoStructureList(cls)
ars_django_structure_list[cls.__name__] = ars_django_structure_list[cls]
cls._struct_rights = set([field_permission for (field_name, field_permission) in fields]
+ [field_permission for (field_name, field_type, field_permission) in extra_fields]
+ ['VIEW'])
def init():
global Privilege
from satori.core.models import Privilege
|
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Drivers for streaming reductions framework."""
import warnings
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import random
from tensorflow_probability.python.experimental.mcmc import sample
from tensorflow_probability.python.experimental.mcmc import sample_discarding_kernel
from tensorflow_probability.python.experimental.mcmc import step
from tensorflow_probability.python.experimental.mcmc import thinning_kernel
from tensorflow_probability.python.experimental.mcmc import with_reductions
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'sample_chain_with_burnin',
'sample_fold',
]
def sample_fold(
num_steps,
current_state,
previous_kernel_results=None,
kernel=None,
reducer=None,
previous_reducer_state=None,
return_final_reducer_states=False,
num_burnin_steps=0,
num_steps_between_results=0,
parallel_iterations=10,
seed=None,
name=None,
):
"""Computes the requested reductions over the `kernel`'s samples.
To wit, runs the given `kernel` for `num_steps` steps, and consumes
the stream of samples with the given `Reducer`s' `one_step` method(s).
This runs in constant memory (unless a given `Reducer` builds a
large structure).
The driver internally composes the correct onion of `WithReductions`
and `SampleDiscardingKernel` to implement the requested optionally
thinned reduction; however, the kernel results of those applied
Transition Kernels will not be returned. Hence, if warm-restarting
reductions is desired, one should manually build the Transition Kernel
onion and use `tfp.experimental.mcmc.step_kernel`.
An arbitrary collection of `reducer` can be provided, and the resulting
finalized statistic(s) will be returned in an identical structure.
This function can sample from and reduce over multiple chains, in parallel.
Whether or not there are multiple chains is dictated by how the `kernel`
treats its inputs. Typically, the shape of the independent chains is shape of
the result of the `target_log_prob_fn` used by the `kernel` when applied to
the given `current_state`.
Args:
num_steps: Integer or scalar `Tensor` representing the number of `Reducer`
steps.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s.
Warm-start for the auxiliary state needed by the given `kernel`.
If not supplied, `sample_fold` will cold-start with
`kernel.bootstrap_results`.
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
reducer: A (possibly nested) structure of `Reducer`s to be evaluated
on the `kernel`'s samples. If no reducers are given (`reducer=None`),
then `None` will be returned in place of streaming calculations.
previous_reducer_state: A (possibly nested) structure of running states
corresponding to the structure in `reducer`. For resuming streaming
reduction computations begun in a previous run.
return_final_reducer_states: A Python `bool` giving whether to return
resumable final reducer states.
num_burnin_steps: Integer or scalar `Tensor` representing the number
of chain steps to take before starting to collect results.
Defaults to 0 (i.e., no burn-in).
num_steps_between_results: Integer or scalar `Tensor` representing
the number of chain steps between collecting a result. Only one out
of every `num_steps_between_samples + 1` steps is included in the
returned results. Defaults to 0 (i.e., no thinning).
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'mcmc_sample_fold').
Returns:
reduction_results: A (possibly nested) structure of finalized reducer
statistics. The structure identically mimics that of `reducer`.
end_state: The final state of the Markov chain(s).
final_kernel_results: `collections.namedtuple` of internal calculations
used to advance the supplied `kernel`. These results do not include
the kernel results of `WithReductions` or `SampleDiscardingKernel`.
final_reducer_states: A (possibly nested) structure of final running reducer
states, if `return_final_reducer_states` was `True`. Can be used to
resume streaming reductions when continuing sampling.
"""
with tf.name_scope(name or 'mcmc_sample_fold'):
num_steps = tf.convert_to_tensor(
num_steps, dtype=tf.int32, name='num_steps')
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, name='current_state'),
current_state)
reducer_was_none = False
if reducer is None:
reducer = []
reducer_was_none = True
thinning_k = sample_discarding_kernel.SampleDiscardingKernel(
inner_kernel=kernel,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results)
reduction_kernel = with_reductions.WithReductions(
inner_kernel=thinning_k,
reducer=reducer,
# Strip thinning kernel results layer
adjust_kr_fn=lambda kr: kr.inner_results,
)
if previous_kernel_results is None:
previous_kernel_results = kernel.bootstrap_results(current_state)
thinning_pkr = thinning_k.bootstrap_results(
current_state, previous_kernel_results)
reduction_pkr = reduction_kernel.bootstrap_results(
current_state, thinning_pkr, previous_reducer_state)
end_state, final_kernel_results = step.step_kernel(
num_steps=num_steps,
current_state=current_state,
previous_kernel_results=reduction_pkr,
kernel=reduction_kernel,
return_final_kernel_results=True,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
reduction_results = nest.map_structure_up_to(
reducer,
lambda r, s: r.finalize(s),
reducer,
final_kernel_results.reduction_results,
check_types=False)
if reducer_was_none:
reduction_results = None
# TODO(axch): Choose a friendly return value convention that
# - Doesn't burden the user with needless stuff when they don't want it
# - Supports warm restart when the user does want it
# - Doesn't trigger Pylint's unbalanced-tuple-unpacking warning.
if return_final_reducer_states:
return (reduction_results,
end_state,
final_kernel_results.inner_results.inner_results,
final_kernel_results.reduction_results)
else:
return (reduction_results,
end_state,
final_kernel_results.inner_results.inner_results)
def _trace_current_state(current_state, kernel_results):
del kernel_results
return current_state
def sample_chain_with_burnin(
num_results,
current_state,
previous_kernel_results=None,
kernel=None,
num_burnin_steps=0,
num_steps_between_results=0,
trace_fn=_trace_current_state,
parallel_iterations=10,
seed=None,
name=None,
):
"""Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from a Markov chain at `current_state` whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such 'thinning'
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized, and thus do not
increase memory requirements.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all chain states but no kernel results are
traced.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e.,
'experimental_mcmc_sample_chain_with_burnin').
Returns:
result: A `RunKernelResults` instance containing information about the
sampling run. Main field is `trace`, the history of outputs of
`trace_fn`. See `RunKernelResults` for contents of other fields.
#### References
[1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
"""
with tf.name_scope(name or 'experimental_mcmc_sample_chain_with_burnin'):
if not kernel.is_calibrated:
warnings.warn('supplied `TransitionKernel` is not calibrated. Markov '
'chain may not converge to intended target distribution.')
if trace_fn is None:
trace_fn = lambda *args: ()
burnin_seed, sampling_seed = random.split_seed(seed, n=2)
# Burn-in run
chain_state, kr = step.step_kernel(
num_steps=num_burnin_steps,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=kernel,
return_final_kernel_results=True,
parallel_iterations=parallel_iterations,
seed=burnin_seed,
name='burnin')
thinning_k = thinning_kernel.ThinningKernel(
kernel, num_steps_to_skip=num_steps_between_results)
# ThinningKernel doesn't wrap the kernel_results structure, so we don't need
# any of the usual munging.
results = sample.sample_chain(
num_results=num_results,
current_state=chain_state,
previous_kernel_results=kr,
kernel=thinning_k,
trace_fn=trace_fn,
parallel_iterations=parallel_iterations,
seed=sampling_seed,
name='sampling')
del results.resume_kwargs['reducer']
del results.resume_kwargs['previous_reducer_state']
return results
|
|
"""
inference
=========
EP inference drivers (in the future, other inference methods could be
implemented here as well).
"""
import numpy as np
import scipy.linalg as sla
import scipy.sparse as ssp
import scipy.io # DEBUG: Load Matlab files
import numbers
import time # For profiling
import apbsint.helpers as helpers
import apbsint.coup_fact as cf
import apbsint.utilities as ut
import apbsint.eptools_ext as epx
__all__ = ['InfDriver', 'CoupledInfDriver', 'EPCoupParallelInfDriver',
'EPCoupSequentialInfDriver', 'EPFactorizedInfDriver']
# Inference driver classes
class InfDriver:
"""
InfDriver
=========
Base class for approximate inference drivers.
A driver is configured by a Model and a Representation. The former
contains model descriptions (potential manager, coupling factor), the
latter contains the representation of the posterior approximation.
The 'inference' method operates on the representation, fitting the
approximate to the true posterior. The 'predict' method computes
predictive moments on a test Model.
"""
def __init__(self,model,rep):
if not isinstance(model,ut.Model):
raise TypeError('MODEL must be instance of apbsint.Model')
if not isinstance(rep,ut.Representation):
raise TypeError('REP must be instance of apbsint.Representation')
if not model.bfact is rep.bfact:
raise ValueError('MODEL.BFACT, REP.BFACT must be same object')
self.model = model
self.rep = rep
def inference(self,opts):
raise NotImplementedError('INFERENCE must be implemented')
def predict(self,pmodel,opts):
raise NotImplementedError('PREDICT must be implemented')
def _predict_epcomp(self,pmodel,h_q,rho_q):
"""
Helper for 'predict'. Given Gaussian moments in 'h_q', 'rho_q', runs
local EP computations and returns 'logz', 'h_p', 'rho_p'. See
docstring of 'CoupledInfDriver.predict'.
"""
pbfact = pmodel.bfact
(pm, n) = pbfact.shape()
ppotman = pmodel.potman
ppotman.check_internal()
rstat = np.empty(pm,dtype=np.int32)
alpha = np.empty(pm)
nu = np.empty(pm)
logz = np.empty(pm)
epx.epupdate_parallel(ppotman.potids,ppotman.numpot,ppotman.parvec,
ppotman.parshrd,ppotman.annobj,h_q,rho_q,rstat,
alpha,nu,logz)
indok = np.nonzero(rstat)[0]
tvec = 1. - nu[indok]*rho_q[indok]
indok2 = np.nonzero(tvec >= 1e-9)[0]
if indok2.shape[0] < pm:
indok = indok[indok2].copy()
# ATTENTION: This is **very** slow:
indnok = [x for x in range(pm) if x not in set(indok)]
logz[indnok] = 0.
h_p = h_q.copy()
rho_p = rho_q.copy()
h_p[indok] += alpha[indok]*rho_q[indok]
rho_p[indok] *= tvec[indok2]
else:
h_p = h_q + alpha*rho_q
rho_p = rho_q*tvec
return (logz, h_p, rho_p)
def _infer_check_commonargs(self,opts):
"""
Checks common arguments of 'inference' implementations in
subclasses and assigns default values.
'opts.imode' must be 'CoupParallel', 'CoupSequential' or
'Factorized'.
"""
if not (opts.imode == 'CoupParallel' or
opts.imode == 'CoupSequential' or
opts.imode == 'Factorized'):
raise ValueError('OPTS.IMODE has wrong value')
if not (isinstance(opts.maxit,numbers.Integral) and opts.maxit>=1):
raise TypeError('OPTS.MAXIT wrong')
if not (isinstance(opts.deltaeps,numbers.Real) and opts.deltaeps>0.):
raise TypeError('OPTS.DELTAEPS wrong')
try:
if not (isinstance(opts.damp,numbers.Real) and opts.damp>=0. and
opts.damp<1.):
raise TypeError('OPTS.DAMP wrong')
except AttributeError:
opts.damp = 0.
try:
if not isinstance(opts.res_det,bool):
raise TypeError('OPTS.RES_DET wrong')
except AttributeError:
opts.res_det = False
try:
if not isinstance(opts.verbose,numbers.Integral):
raise TypeError('OPTS.VERBOSE wrong')
except AttributeError:
opts.verbose = 0
if opts.imode != 'Factorized':
try:
if not isinstance(opts.bc_testmodel,ut.ModelCoupled):
raise TypeError('OPTS.BC_TESTMODEL must be apbsint.ModelCoupled')
except AttributeError:
pass
try:
if not (isinstance(opts.caveps,numbers.Real) and opts.caveps>0.):
raise TypeError('OPTS.CAVEPS wrong')
except AttributeError:
opts.caveps = 1e-5
else:
try:
if not isinstance(opts.bc_testmodel,ut.ModelFactorized):
raise TypeError('OPTS.BC_TESTMODEL must be apbsint.ModelFactorized')
except AttributeError:
pass
if opts.imode != 'CoupParallel':
try:
if not isinstance(opts.refresh,bool):
raise TypeError('OPTS.REFRESH wrong')
except AttributeError:
opts.refresh = True
def _binclass_print_teststats(self,pmodel,targets,imode):
"""
Helper for 'inference'. Only for binary classification right now.
'targets' must be target vector (values -1, +1).
Calls 'predict', computes test set statistics (accuracy, avg. log
likelihood) and prints information.
"""
popts = helpers.Struct()
popts.imode = imode
popts.ptype = 3
(h_q, rho_q, logz, h_p, rho_p) = self.predict(pmodel,popts)
nte = targets.shape[0]
acc = 100.*float((np.sign(h_q)==targets).sum())/nte
loglh = logz.sum()/nte
print ('Test set predictions: Accuracy: %4.2f%%, '
'log likelihood: %.6f') % (acc, loglh)
def _binclass_assemble_targets(self,opts):
# Assemble test target vector
pman = opts.bc_testmodel.potman
targets = np.empty(pman.size)
off = 0
for el in pman.elem:
sz = el.size
targets[off:off+sz] = el.pars[0]
off += sz
return targets
class CoupledInfDriver(InfDriver):
"""
CoupledInfDriver
================
Base class of coupled mode inference drivers. 'predict' is implemented
here, as well as 'init'.
"""
def __init__(self,model,rep):
if not isinstance(model,ut.ModelCoupled):
raise TypeError('MODEL must be instance of apbsint.ModelCoupled')
if not isinstance(rep,ut.RepresentationCoupled):
raise TypeError('REP must be instance of apbsint.RepresentationCoupled')
InfDriver.__init__(self,model,rep)
def predict(self,pmodel,opts):
"""
Prediction on test model 'pmodel' (type apbsint.ModelCoupled). 'opts'
is a struct with attributes:
- imode: Inference mode ('CoupParallel', 'CoupSequential')
- ptype: What predictive moments are returned?
0: Gaussian means h_q
1: Gaussian moments (h_q, rho_q)
2: Predictive moments (logz, h_p, rho_p)
3: Everything (h_q, rho_q, logz, h_p, rho_p)
Here, the predictive marginal at a test point is
p(s) = Z^-1 t(s) q(s),
where t(s) is the potential, q(s) the Gaussian marginal. 'logz'
returns the log Z values
NOTE: We assume that the posterior covariance A^-1 is in 'rep.post_cov'
if 'opts.imode'=='CoupParallel'. In the other modes, A^-1 is
recomputed.
"""
model = self.model
rep = self.rep
if not isinstance(pmodel,ut.ModelCoupled):
raise TypeError('PMODEL must be instance of apbsint.ModelCoupled')
pbfact = pmodel.bfact
(pm, n) = pbfact.shape()
if n != model.bfact.shape(1):
raise TypeError('PMODEL, MODEL: Different number of variables')
use_cov = (opts.imode == 'CoupParallel')
if not (isinstance(opts.ptype,numbers.Integral) and opts.ptype>=0 and
opts.ptype<=3):
raise ValueError('opts.ptype wrong')
# Compute Gaussian moments
h_q = np.empty(pm)
rho_q = np.empty(pm) if opts.ptype>0 else None
rep.predict(pbfact,h_q,rho_q,use_cov)
if opts.ptype==0:
return h_q
elif opts.ptype==1:
return (h_q, rho_q)
if opts.ptype==2:
res = ()
else:
res = (h_q, rho_q)
return res + self._predict_epcomp(pmodel,h_q,rho_q)
def init(self,mode,refresh=True):
"""
Initialize EP parameters according to mode 'mode'. The representation
is refreshed afterwards iff 'refresh'==True. Modes:
- 'ADF': Parameters for all non-Gaussian potentials set to zero. For
Gaussian potentials, parameters are set to represent them (they do
not change afterwards).
"""
if mode.upper() == 'ADF':
bfact = self.model.bfact
potman = self.model.potman
rep = self.rep
m, n = bfact.shape()
potman.check_internal()
ep_pi = np.zeros(rep.size_pars())
ep_beta = np.zeros(rep.size_pars())
off = 0
for el in potman.elem:
numk = el.size
if el.name == 'Gaussian':
ep_pi[off:off+numk] = 1./el.pars[1]
ep_beta[off:off+numk] = el.pars[0]/el.pars[1]
off += numk
rep.setpi(ep_pi)
rep.setbeta(ep_beta)
if refresh:
rep.refresh()
else:
raise ValueError("Unknown mode '" + mode + "'")
class EPCoupParallelInfDriver(CoupledInfDriver):
"""
EPCoupParallelInfDriver
=======================
Implements parallel updating expectation propagation in 'inference'.
"""
def __init__(self,model,rep):
CoupledInfDriver.__init__(self,model,rep)
def inference(self,opts):
"""
Update representation by running parallel updating EP. One sweep
consists of (a) parallel EP updates on all non-Gaussian potentials
(model.potman.updind), then (b) a recomputation (refresh) of the
representation. The latter stores the posterior covariance in
rep.post_cov, which is recycled by 'predict'. 'opts' attributes:
- maxit: Maximum number of sweeps
- deltaeps: Threshold for convergence (statistic based on relative
change of Gaussian means and stddevs.)
- damp: Damping constant (def.: 0 -> no damping)
- caveps: Update on k is skipped if
cavvar_k / margvar_k > 1/caveps,
or if there is some numerical failure
- res_det: Return detailed results in 'res_det' (below)? Def.: False
- verbose: Verbosity level (0: no messages, 1: some messages). Def.: 0
- bc_testmodel: Optional. Only for binary classification right now.
Test set model (type apbsint.ModelCoupled). Test set accuracy and
avg. log likelihood are computed and printed after each sweep.
Returns 'res' or '(res, res_det)' (latter if 'opts.res_det'==True).
'res' attributes:
- rstat: Return status (0: Converged to 'deltaeps'; 1: Done
'maxit' sweeps)
- nit: Number of sweeps done
- delta: Value convergence statistic after last sweep
- nskip: Total number of skipped updates across all sweeps
'res_det' attributes (optional):
- delta: Value after each sweep
- nskip: Value after each sweep
"""
#t_start0=time.time()
if not self.rep.keep_margs:
raise ValueError('REP.KEEP_MARGS must be True')
opts.imode = 'CoupParallel'
self._infer_check_commonargs(opts)
# Initialization
res = helpers.Struct()
res.rstat = 1
res.nskip = 0
if opts.res_det:
res_det = helpers.Struct()
res_det.delta = []
res_det.nskip = []
bfact = self.model.bfact
potman = self.model.potman
rep = self.rep
m, n = bfact.shape()
potman.check_internal()
try:
targets = self._binclass_assemble_targets(opts)
do_teststats = True
except AttributeError:
do_teststats = False
#t_stop=time.time()
#print 'Time(inference::init): %.8fs' % (t_stop-t_start0)
# Loop over sweeps
mm = potman.updind.shape[0]
cmu = np.empty(mm)
crho = np.empty(mm)
alpha0 = np.empty(mm)
nu0 = np.empty(mm)
rstat = np.empty(mm,dtype=np.int32)
sz = rep.ep_pi.shape[0]
new_pi = np.empty(sz)
new_beta = np.empty(sz)
old_margs = np.empty(2*mm)
new_margs = np.empty(2*mm)
for res.nit in range(1,opts.maxit+1):
#t_start1=time.time()
# Local EP updates
# We update only on potentials in 'potman.updind' (excludes
# Gaussians)
indok = potman.updind
# Compute cavity marginals
#t_start=time.time()
cmu[:] = rep.marg_means[indok]
crho[:] = rep.marg_vars[indok]
tvec = 1. - rep.ep_pi[indok]*crho
indok2 = np.nonzero(tvec >= opts.caveps)[0]
if indok2.shape[0] == mm:
cmu -= crho*rep.ep_beta[indok]
cmu /= tvec
crho /= tvec
else:
indok = indok[indok2].copy()
cmu[indok2] -= crho[indok2]*rep.ep_beta[indok]
tvec = 1./tvec[indok2]
cmu[indok2] *= tvec
crho[indok2] *= tvec
#t_stop=time.time()
#print 'Time(inference:comp_cav): %.8fs' % (t_stop-t_start)
#t_start=time.time()
epx.epupdate_parallel(potman.potids,potman.numpot,potman.parvec,
potman.parshrd,potman.annobj,cmu,crho,rstat,
alpha0,nu0,None,potman.updind)
#t_stop=time.time()
#print 'Time(epupdate_parallel): %.8fs' % (t_stop-t_start)
# Update EP parameters, and figure out where skips happened
#t_start=time.time()
if indok2.shape[0] < mm:
# Complement of 'indok2'
tarr = np.ones(mm,dtype=np.bool)
tarr[indok2] = False
indnok = np.nonzero(tarr)[0]
rstat[indnok] = 0 # Filter out undef. cavity positions
#print 'Time(indnok stuff): %.8fs' % (time.time()-t_start)
indok2 = np.nonzero(rstat)[0]
new_pi[:] = rep.ep_pi
new_beta[:] = rep.ep_beta
# 'nu0', 'alpha0' must remain full size
if indok2.shape[0] < mm:
nu = nu0[indok2].copy()
alpha = alpha0[indok2].copy()
else:
nu = nu0
alpha = alpha0
# Just a sanity check (this should not fire)
tvec = 1. - nu*crho[indok2]
indok3 = np.nonzero(tvec >= 1e-7)[0]
if indok3.shape[0] < indok2.shape[0]:
print 'UUPS[EPCoupParallelInfDriver.inference]: On %d' % (indok2.shape[0]-indok3.shape[0])
tvec = tvec[indok3].copy()
nu = nu[indok3].copy()
alpha = alpha[indok3].copy()
indok2 = indok2[indok3].copy()
if indok2.shape[0] < mm:
indok = potman.updind[indok2].copy()
else:
indok = potman.updind
new_pi[indok] = nu/tvec
new_beta[indok] = (cmu[indok2]*nu + alpha)/tvec
#t_stop=time.time()
#print 'Time(inference:updpars1): %.8fs' % (t_stop-t_start)
#t_start=time.time()
# Damping
if opts.damp > 0.:
new_pi[indok] = (1.-opts.damp)*new_pi[indok] + \
opts.damp*rep.ep_pi[indok]
new_beta[indok] = (1.-opts.damp)*new_beta[indok] + \
opts.damp*rep.ep_beta[indok]
nskip = mm - indok.shape[0] # Number of skips
# Recompute representation (refresh)
# Posterior covariance is kept in 'rep.post_cov'
indok = potman.updind
old_margs[:mm] = rep.marg_means[indok]
old_margs[mm:] = np.sqrt(rep.marg_vars[indok])
rep.setpi(new_pi)
rep.setbeta(new_beta)
#t_stop=time.time()
#print 'Time(inference:updpars2): %.8fs' % (t_stop-t_start)
rep.refresh()
new_margs[:mm] = rep.marg_means[indok]
new_margs[mm:] = np.sqrt(rep.marg_vars[indok])
res.delta = helpers.maxreldiff(old_margs,new_margs)
# End of sweep: Write results
res.nskip += nskip
if opts.res_det:
res_det.delta.append(res.delta)
res_det.nskip.append(nskip)
if opts.verbose>0:
print 'It. %d: delta=%f, nskip=%d' % (res.nit,res.delta,nskip)
if do_teststats:
self._binclass_print_teststats(opts.bc_testmodel,targets,
opts.imode)
# Convergence?
if res.delta < opts.deltaeps:
res.rstat = 0
break
#t_stop1=time.time()
#print 'Time(inference::sweep): %.8fs' % (t_stop1-t_start1)
# Timing
#t_stop0=time.time()
#print 'Time(inference(ALL)): %.8fs' % (t_stop0-t_start0)
# Return stuff
if opts.res_det:
return (res, res_det)
else:
return res
def predict(self,pmodel,opts):
# Make sure that 'opts.imode' is correct
opts.imode = 'CoupParallel'
return CoupledInfDriver.predict(self,pmodel,opts)
class EPCoupSequentialInfDriver(CoupledInfDriver):
"""
EPCoupSequentialInfDriver
=========================
Implements sequential updating expectation propagation in 'inference'.
The representation is updated after each local EP update, using a
Cholesky update/downdate. This is much slower than parallel updating
in general, but may converge more reliably.
If 'rep.keep_margs'==True, the marginals are kept up-2-date at all
times. Right now, this is a waste of time.
TODO: Implement optimized update scheduling, based on forward scoring
and marginal moments.
"""
def __init__(self,model,rep):
CoupledInfDriver.__init__(self,model,rep)
def inference(self,opts):
"""
Update representation by running sequential updating EP. In a sweep,
we iterate over all potentials in model.potman.updind (non-Gaussians)
in random ordering. 'opts' attributes:
- maxit: Maximum number of sweeps
- deltaeps: Threshold for convergence (statistic based on relative
change of Gaussian means and stddevs.)
- damp: Damping constant (def.: 0 -> no damping)
On top of this, we apply selective damping to make sure that
1 + (Delta pi_k) margvar_k >= caveps
- caveps: Update on k is skipped if
cavvar_k / margvar_k > 1/caveps,
or if there is some numerical failure
- skipeps: Update is skipped if absolute change in pi_k is smaller
than 'skipeps'
- refresh: Refresh representation after each sweep? Def.: True
- upd_1stsweep: Optional. Set of str. If given, in the 1st sweep, we
only update on potentials whose type name is contained in the set.
- res_det: Return detailed results in 'res_det' (below)? Def.: False
- verbose: Verbosity level (0: no messages, 1: some messages). Def.: 0
- bc_testmodel: Optional. See EPCoupParallelInfDriver.inference.
Returns 'res' or '(res, res_det)' (latter if 'opts.res_det'==True).
Each update results in a skip status, summarized in 'nskip'
histograms:
- 0: Not skipped
- 1: Skipped due to cavity marginal ('caveps') or local EP failure
- 2: Skipped due to small change ('skipeps', selective damping
- 3: Skipped due to Cholesky up/downdate error
'res' has attributes:
- rstat: Return status (0: Converged to 'deltaeps'; 1: Done
'maxit' sweeps)
- nit: Number of sweeps done
- delta: Value convergence statistic after last sweep
- nskip: Skip status histogram (vector of size 4), summed over all
updates and sweeps
'res_det' attributes (optional):
- delta: Value after each sweep
- nskip: Matrix, each row skip status histogram for a sweep
"""
opts.imode = 'CoupSequential'
self._infer_check_commonargs(opts)
try:
if not (isinstance(opts.skipeps,numbers.Real) and
opts.skipeps>0.):
raise TypeError('OPTS.SKIPEPS wrong')
except AttributeError:
opts.skipeps = 1e-8
try:
if not isinstance(opts.upd_1stsweep,set):
raise TypeError('OPTS.UPD_1STSWEEP wrong')
do_1stsweep = True
except AttributeError:
do_1stsweep = False
# Initialization
res = helpers.Struct()
res.rstat = 1
res.nskip = np.zeros(4,dtype=np.int32)
if opts.res_det:
res_det = helpers.Struct()
res_det.delta = []
res_det.nskip = []
bfact = self.model.bfact
potman = self.model.potman
rep = self.rep
m, n = bfact.shape()
potman.check_internal()
if do_1stsweep:
ind_swp1 = set(potman.filterpots(opts.upd_1stsweep))
try:
targets = self._binclass_assemble_targets(opts)
do_teststats = True
except AttributeError:
do_teststats = False
# Loop over sweeps
vvec = np.empty(n)
for res.nit in range(1,opts.maxit+1):
updind = np.random.permutation(potman.updind)
if do_1stsweep and res.nit==1:
updind = [x for x in updind if x in ind_swp1]
if len(updind)==0:
raise IndexError('UPDIND empty: No potentials to update on?')
# Loop over potentials in UPDIND
nskip = [0]*4
delta = 0.
for j in updind:
# np.int32 not instanceof numbers.Integral (sucks!)
j = int(j)
# Compute cavity marginals
ep_pi = rep.ep_pi[j]
ep_beta = rep.ep_beta[j]
do_skip = 0
# vvec = L^-1 B[j,:] required below
(mu, rho) = rep.get_marg(j,vvec)
tscal = 1. - ep_pi*rho
if tscal >= opts.caveps:
crho = rho/tscal
cmu = (mu - ep_beta*rho)/tscal
# Local EP update
(rstat, alpha, nu, logz) \
= epx.epupdate_single_pman(potman.potids,potman.numpot,
potman.parvec,
potman.parshrd,
potman.annobj,j,cmu,crho)
if rstat == 0:
do_skip = 1 # Local EP update failed
else:
tscal = 1. - nu*crho;
if tscal>=1e-7:
new_pi = nu/tscal
new_beta = (cmu*nu + alpha)/tscal
else:
do_skip = 1 # Local EP update failed
else:
do_skip = 1 # Cavity marginal invalid
if do_skip == 0:
# Damping
dfl_pi = new_pi-ep_pi # Full update
dfl_beta = new_beta-ep_beta
delpi = (1.-opts.damp)*dfl_pi
delbeta = (1.-opts.damp)*dfl_beta
delpi2 = delpi; delbeta2 = delbeta
# Selective damping
if delpi*rho + 1. < opts.caveps:
delpi = (opts.caveps-1.)/rho
delbeta = (delpi/dfl_pi)*delbeta
new_pi = ep_pi+delpi
new_beta = ep_beta+delbeta
if abs(delpi)>=opts.skipeps:
# Update representation
try:
rep.update_single(j,delpi,delbeta,vvec)
except sla.LinAlgError:
do_skip = 3 # Numerical error Cholesky up/down
else:
# Small |delpi| counted as skip only if due to
# selective damping
do_skip = 2 if abs(delpi2)>=opts.skipeps else 4
nskip[do_skip if do_skip<4 else 0] += 1
if do_skip == 0:
hrho = crho*(1. - nu*crho)
hmu = cmu + alpha*crho
delta = max(delta,
helpers.maxreldiff(np.array([hmu,
np.sqrt(hrho)]),
np.array([mu,
np.sqrt(rho)])))
# Write back results
res.nskip += np.array(nskip,dtype=np.int32)
res.delta = delta
if opts.res_det:
res_det.delta.append(delta)
res_det.nskip.append(nskip)
if opts.refresh:
rep.refresh()
if opts.verbose>0:
print 'It. %d: delta=%f, nnskip=%d' % (res.nit,res.delta,
sum(nskip[1:]))
print ' nskip=', nskip
if do_teststats:
self._binclass_print_teststats(opts.bc_testmodel,targets,
opts.imode)
if res.delta < opts.deltaeps:
res.rstat = 0
break
# Return stuff
if opts.res_det:
return (res, res_det)
else:
return res
class EPFactorizedInfDriver(InfDriver):
"""
EPFactorizedInfDriver
=====================
Implements expectation propagation inference in factorized mode.
"""
def __init__(self,model,rep):
if not isinstance(model,ut.ModelFactorized):
raise TypeError('MODEL must be instance of apbsint.ModelFactorized')
if not isinstance(rep,ut.RepresentationFactorized):
raise TypeError('REP must be instance of apbsint.RepresentationFactorized')
InfDriver.__init__(self,model,rep)
def init(self,mode,refresh=True,cav_var=1.):
"""
Initialize EP parameters according to mode 'mode'. The representation
is refreshed afterwards iff 'refresh'==True. Modes:
- 'ADF': Parameters for all non-Gaussian potentials set to zero. For
Gaussian potentials, we use a heuristic which depends on 'cav_var'
(see technical report).
NOTE: For a potential j with V_j = {i} and B[j,i] = 1, the Gaussian
potential is represented exactly (independent of 'cav_var'), and the
EP parameters remain fixed there.
"""
if mode.upper() == 'ADF':
bfact = self.model.bfact
bmat = bfact.get_mat()
potman = self.model.potman
rep = self.rep
m, n = bfact.shape()
potman.check_internal()
ep_pi = np.zeros(rep.size_pars())
ep_beta = np.zeros(rep.size_pars())
off = 0
for el in potman.elem:
numk = el.size
if el.name == 'Gaussian':
# If potential is N(s | y_j,ssq_j) and cv=='cav_var':
# pi_ji = b_ji^2 / ( (|V_j|-1) cv + ssq_j )
# beta_ji = b_ji y_j / ( (|V_j|-1) cv + ssq_j )
# Offset into EP parameter vectors:
off2 = bmat[:off].getnnz() if off>0 else 0
mx_tmp = bfact.b2fact[off:off+numk].copy()
sz2 = mx_tmp.getnnz()
# Number of nonzeros per row minus 1:
vjsz = mx_tmp.indptr[1:] - mx_tmp.indptr[:-1] - 1
tvec = 1./(cav_var*vjsz + el.pars[1])
mx_dg = ssp.diags(tvec,0)
mx_tmp = mx_dg * mx_tmp
ep_pi[off2:off2+sz2] = mx_tmp.data
mx_tmp = bmat[off:off+numk].copy()
assert mx_tmp.getnnz() == sz2
# Some y_j's could be zero, which would change the sparsity
# pattern. Have to go a detour here
nzind = mx_tmp.nonzero()
tvec *= el.pars[0]
mx_dg = ssp.diags(tvec,0)
mx_tmp = mx_dg * mx_tmp
ep_beta[off2:off2+sz2] = mx_tmp[nzind[0],nzind[1]]
off += numk
rep.setpi(ep_pi)
rep.setbeta(ep_beta)
if refresh:
rep.refresh()
else:
raise ValueError("Unknown mode '" + mode + "'")
def predict(self,pmodel,opts):
"""
Prediction on test model 'pmodel' (type apbsint.ModelFactorized).
'opts' is a struct with attributes:
- ptype: What predictive moments are returned?
0: Gaussian means h_q
1: Gaussian moments (h_q, rho_q)
2: Predictive moments (logz, h_p, rho_p)
3: Everything (h_q, rho_q, logz, h_p, rho_p)
Here, the predictive marginal at a test point is
p(s) = Z^-1 t(s) q(s),
where t(s) is the potential, q(s) the Gaussian marginal. 'logz'
returns the log Z values
"""
model = self.model
rep = self.rep
if not isinstance(pmodel,ut.ModelFactorized):
raise TypeError('PMODEL must be instance of apbsint.ModelFactorized')
pbfact = pmodel.bfact
pm, n = pbfact.shape()
if n != model.bfact.shape(1):
raise TypeError('PMODEL, MODEL: Different number of variables')
if not (isinstance(opts.ptype,numbers.Integral) and opts.ptype>=0 and
opts.ptype<=3):
raise ValueError('opts.ptype wrong')
# Compute Gaussian moments
h_q = np.empty(pm)
rho_q = np.empty(pm) if opts.ptype>0 else None
rep.predict(pbfact,h_q,rho_q)
if opts.ptype==0:
return h_q
elif opts.ptype==1:
return (h_q, rho_q)
if opts.ptype==2:
res = ()
else:
res = (h_q, rho_q)
return res + self._predict_epcomp(pmodel,h_q,rho_q)
def inference(self,opts):
"""
Update representation by running sweeps of EP updates. In one sweep,
we iterate sequentially over all potentials in random ordering.
Selective damping is used (and the SD representation updated) iff
activated (see apbsint.RepresentationFactorized).
'opts' attributes:
- maxit: Maximum number of sweeps
- deltaeps: Threshold for convergence (statistic based on relative
change of Gaussian means and stddevs.)
- damp: Damping constant (def.: 0 -> no damping)
- piminthres: pi values for cavity moments must be > (.)/2 for an
update not to fail. If selective damping is active, it aims to
enforce that future cavity pi values are >= (.).
Def.: 1e-8
- refresh: If True, marginals are refreshed from messages after each
sweep. Def.: True
- skip_gauss: If True, EP updates are not done on potentials of type
'Gaussian'. Def.: False
- upd_1stsweep: See apbsint.EPCoupSequentialInfDriver.inference.
Optional
- res_det: Return detailed results in 'res_det' (below)? Def.: False
- verbose: Verbosity level (0: no messages, 1: some messages). Def.: 0
- bc_testmodel: See apbsint.EPCoupParallelInfDriver.inference.
Optional
Returns 'res' or '(res, res_det)' (latter if 'opts.res_det'==True).
Each update results in a skip status, summarized in 'nskip'
histograms:
- 0: Not skipped
- 1: Skipped due to invalid cavity marginal ('piminthres')
- 2: Skipped due to local EP update error
- 3: Skipped due to invalid new marginal ('piminthres')
- 4: Skipped due to selective damping
'res' attributes:
- rstat: Return status (0: Converged to 'deltaeps'; 1: Done
'maxit' sweeps)
- nit: Number of sweeps done
- delta: Value convergence statistic after last sweep
- nskip: Skip status histogram (vector of size 5), summed over all
updates and sweeps
- nsdamp: Only if selective damping active. Number of non-skipped
updates which were selectively damped
'res_det' attributes (optional):
- delta: Value after each sweep
- nskip: Matrix, each row skip status histogram for a sweep
- nsdamp: S.a. Value for each sweep
"""
opts.imode = 'Factorized'
self._infer_check_commonargs(opts)
try:
if not (isinstance(opts.piminthres,numbers.Real) and
opts.skipeps>0.):
raise TypeError('OPTS.PIMINTHRES wrong')
except AttributeError:
opts.piminthres = 1e-8
try:
if not isinstance(opts.upd_1stsweep,set):
raise TypeError('OPTS.UPD_1STSWEEP wrong')
do_1stsweep = True
except AttributeError:
do_1stsweep = False
try:
if not isinstance(opts.skip_gauss,bool):
raise TypeError('OPTS.SKIP_GAUSS wrong')
except AttributeError:
opts.skip_gauss = False
# Initialization
bfact = self.model.bfact
potman = self.model.potman
rep = self.rep
m, n = bfact.shape()
potman.check_internal()
try:
do_seldamp = (rep.sd_numk>0)
except AttributeError:
do_seldamp = False
res = helpers.Struct()
res.rstat = 1
res.nskip = np.zeros(5,dtype=np.int32)
if do_seldamp:
res.nsdamp = 0
if opts.res_det:
res_det = helpers.Struct()
res_det.delta = []
res_det.nskip = []
res_det.nsdamp = []
if do_1stsweep:
ind_swp1 = set(potman.filterpots(opts.upd_1stsweep))
try:
targets = self._binclass_assemble_targets(opts)
do_teststats = True
except AttributeError:
do_teststats = False
# DEBUG:
# If 'opts.deb_matcomp_fname' is given, we directly compare against
# intermediate results stored by Matlab. This is a file name string
# with %d for 'res.nit' (sweep number; starting 1). The index
# 'updind' is also loaded from that file
try:
if len(opts.deb_matcomp_fname)==0:
raise ValueError('OPTS.DEB_MATCOMP_FNAME wrong')
do_deb_matcomp = True
except AttributeError:
do_deb_matcomp = False
# Loop over sweeps
for res.nit in range(1,opts.maxit+1):
if not do_deb_matcomp:
if not opts.skip_gauss:
updind = np.int32(np.random.permutation(m))
else:
updind = np.random.permutation(potman.updind)
if do_1stsweep and res.nit==1:
# NOTE: This could be very slow...
updind = np.array([x for x in updind if x in ind_swp1],
dtype=np.int32)
if updind.shape[0]==0:
raise IndexError('UPDIND empty: No potentials to update on?')
else:
deb_mc = scipy.io.loadmat(opts.deb_matcomp_fname % res.nit)
updind = deb_mc['updind'].ravel()
# Everything is done by epx.fact_sequpdates
sz = updind.shape[0]
rstat = np.empty(sz,dtype=np.int32)
delta = np.empty(sz)
if not do_seldamp:
epx.fact_sequpdates(n,m,updind,potman.potids,potman.numpot,
potman.parvec,potman.parshrd,potman.annobj,
bfact.rowind,bfact.colind,bfact.bvals,
rep.ep_pi,rep.ep_beta,rep.marg_pi,
rep.marg_beta,opts.piminthres,opts.damp,
rstat,delta)
else:
sd_dampfact = np.empty(sz)
sd_nupd, sd_nrec = \
epx.fact_sequpdates(n,m,updind,potman.potids,potman.numpot,
potman.parvec,potman.parshrd,
potman.annobj,bfact.rowind,
bfact.colind,bfact.bvals,rep.ep_pi,
rep.ep_beta,rep.marg_pi,rep.marg_beta,
opts.piminthres,opts.damp,rstat,delta,
rep.sd_numvalid,rep.sd_topind,
rep.sd_topval,rep.sd_subind,
rep.sd_subexcl,sd_dampfact)
# Among non-skipped updates, count those for which SD_DAMPFACT
# larger than OPTS.DAMP
nsdamp = np.sum(sd_dampfact[np.nonzero(rstat==0)] > opts.damp)
res.nsdamp += nsdamp
if opts.res_det:
res_det.nsdamp.append(nsdamp)
res.delta = max(delta)
nskip = [0]*5
for k in xrange(5):
nskip[k] = np.sum(rstat==k)
res.nskip += np.array(nskip,dtype=np.int32)
if opts.res_det:
res_det.delta.append(res.delta)
res_det.nskip.append(nskip)
if opts.refresh:
rep.refresh()
if opts.verbose>0:
print 'It. %d: delta=%f, nnskip=%d' % (res.nit,res.delta,
sum(nskip[1:]))
if do_seldamp:
print ' nskip=', nskip, ', nsdamp=%d' % nsdamp
else:
print ' nskip=', nskip
if do_teststats:
self._binclass_print_teststats(opts.bc_testmodel,targets,
opts.imode)
# DEBUG
if do_deb_matcomp:
deb_ep_pi = deb_mc['ep_pi'].ravel()
deb_ep_beta = deb_mc['ep_beta'].ravel()
deb_marg_pi = deb_mc['marg_pi'].ravel()
deb_marg_beta = deb_mc['marg_beta'].ravel()
print ('DEBUG[%d]: df(ep_pi)=%.4e, df(ep_beta)=%.4e, ' +
'df(m_ep)=%.4e, df(m_beta)=%.4e') % \
(res.nit, helpers.maxreldiff(rep.ep_pi,deb_ep_pi),
helpers.maxreldiff(rep.ep_beta,deb_ep_beta),
helpers.maxreldiff(rep.marg_pi,deb_marg_pi),
helpers.maxreldiff(rep.marg_beta,deb_marg_beta))
# TODO: Plot absolute differences means, stddevs (as in Matlab)
if res.delta < opts.deltaeps:
res.rstat = 0
break
# Return stuff
if opts.res_det:
return (res, res_det)
else:
return res
# Testcode (really basic)
#if __name__ == "__main__":
|
|
# -*- coding: utf-8 -*-
"""
Django Extensions additional model fields
"""
import re
import six
import string
import warnings
try:
import uuid
HAS_UUID = True
except ImportError:
HAS_UUID = False
try:
import shortuuid
HAS_SHORT_UUID = True
except ImportError:
HAS_SHORT_UUID = False
from django.core.exceptions import ImproperlyConfigured
from django.db.models import DateTimeField, CharField, SlugField
from django.template.defaultfilters import slugify
from django.utils.crypto import get_random_string
from django.utils.encoding import force_text
MAX_UNIQUE_QUERY_ATTEMPTS = 100
class UniqueFieldMixin(object):
def check_is_bool(self, attrname):
if not isinstance(getattr(self, attrname), bool):
raise ValueError("'{}' argument must be True or False".format(attrname))
@staticmethod
def _get_fields(model_cls):
return [
(f, f.model if f.model != model_cls else None) for f in model_cls._meta.get_fields()
if not f.is_relation or f.one_to_one or (f.many_to_one and f.related_model)
]
def get_queryset(self, model_cls, slug_field):
for field, model in self._get_fields(model_cls):
if model and field == slug_field:
return model._default_manager.all()
return model_cls._default_manager.all()
def find_unique(self, model_instance, field, iterator, *args):
# exclude the current model instance from the queryset used in finding
# next valid hash
queryset = self.get_queryset(model_instance.__class__, field)
if model_instance.pk:
queryset = queryset.exclude(pk=model_instance.pk)
# form a kwarg dict used to impliment any unique_together contraints
kwargs = {}
for params in model_instance._meta.unique_together:
if self.attname in params:
for param in params:
kwargs[param] = getattr(model_instance, param, None)
new = six.next(iterator)
kwargs[self.attname] = new
while not new or queryset.filter(**kwargs):
new = six.next(iterator)
kwargs[self.attname] = new
setattr(model_instance, self.attname, new)
return new
class AutoSlugField(UniqueFieldMixin, SlugField):
""" AutoSlugField
By default, sets editable=False, blank=True.
Required arguments:
populate_from
Specifies which field or list of fields the slug is populated from.
Optional arguments:
separator
Defines the used separator (default: '-')
overwrite
If set to True, overwrites the slug on every save (default: False)
Inspired by SmileyChris' Unique Slugify snippet:
http://www.djangosnippets.org/snippets/690/
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('blank', True)
kwargs.setdefault('editable', False)
populate_from = kwargs.pop('populate_from', None)
if populate_from is None:
raise ValueError("missing 'populate_from' argument")
else:
self._populate_from = populate_from
self.slugify_function = kwargs.pop('slugify_function', slugify)
self.separator = kwargs.pop('separator', six.u('-'))
self.overwrite = kwargs.pop('overwrite', False)
self.check_is_bool('overwrite')
self.allow_duplicates = kwargs.pop('allow_duplicates', False)
self.check_is_bool('allow_duplicates')
super(AutoSlugField, self).__init__(*args, **kwargs)
def _slug_strip(self, value):
"""
Cleans up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
"""
re_sep = '(?:-|%s)' % re.escape(self.separator)
value = re.sub('%s+' % re_sep, self.separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
def slugify_func(self, content):
if content:
return self.slugify_function(content)
return ''
def slug_generator(self, original_slug, start):
yield original_slug
for i in range(start, MAX_UNIQUE_QUERY_ATTEMPTS):
slug = original_slug
end = '%s%s' % (self.separator, i)
end_len = len(end)
if self.slug_len and len(slug) + end_len > self.slug_len:
slug = slug[:self.slug_len - end_len]
slug = self._slug_strip(slug)
slug = '%s%s' % (slug, end)
yield slug
raise RuntimeError('max slug attempts for %s exceeded (%s)' %
(original_slug, MAX_UNIQUE_QUERY_ATTEMPTS))
def create_slug(self, model_instance, add):
# get fields to populate from and slug field to set
if not isinstance(self._populate_from, (list, tuple)):
self._populate_from = (self._populate_from, )
slug_field = model_instance._meta.get_field(self.attname)
if add or self.overwrite:
# slugify the original field content and set next step to 2
slug_for_field = lambda field: self.slugify_func(getattr(model_instance, field))
slug = self.separator.join(map(slug_for_field, self._populate_from))
start = 2
else:
# get slug from the current model instance
slug = getattr(model_instance, self.attname)
# model_instance is being modified, and overwrite is False,
# so instead of doing anything, just return the current slug
return slug
# strip slug depending on max_length attribute of the slug field
# and clean-up
self.slug_len = slug_field.max_length
if self.slug_len:
slug = slug[:self.slug_len]
slug = self._slug_strip(slug)
original_slug = slug
if self.allow_duplicates:
setattr(model_instance, self.attname, slug)
return slug
return super(AutoSlugField, self).find_unique(
model_instance, slug_field, self.slug_generator(original_slug, start))
def pre_save(self, model_instance, add):
value = force_text(self.create_slug(model_instance, add))
return value
def get_internal_type(self):
return "SlugField"
def deconstruct(self):
name, path, args, kwargs = super(AutoSlugField, self).deconstruct()
kwargs['populate_from'] = self._populate_from
if not self.separator == six.u('-'):
kwargs['separator'] = self.separator
if self.overwrite is not False:
kwargs['overwrite'] = True
if self.allow_duplicates is not False:
kwargs['allow_duplicates'] = True
return name, path, args, kwargs
class RandomCharField(UniqueFieldMixin, CharField):
""" RandomCharField
By default, sets editable=False, blank=True, unique=False.
Required arguments:
length
Specifies the length of the field
Optional arguments:
unique
If set to True, duplicate entries are not allowed (default: False)
lowercase
If set to True, lowercase the alpha characters (default: False)
uppercase
If set to True, uppercase the alpha characters (default: False)
include_alpha
If set to True, include alpha characters (default: True)
include_digits
If set to True, include digit characters (default: True)
include_punctuation
If set to True, include punctuation characters (default: False)
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('blank', True)
kwargs.setdefault('editable', False)
self.length = kwargs.pop('length', None)
if self.length is None:
raise ValueError("missing 'length' argument")
kwargs['max_length'] = self.length
self.lowercase = kwargs.pop('lowercase', False)
self.check_is_bool('lowercase')
self.uppercase = kwargs.pop('uppercase', False)
self.check_is_bool('uppercase')
if self.uppercase and self.lowercase:
raise ValueError("the 'lowercase' and 'uppercase' arguments are mutually exclusive")
self.include_digits = kwargs.pop('include_digits', True)
self.check_is_bool('include_digits')
self.include_alpha = kwargs.pop('include_alpha', True)
self.check_is_bool('include_alpha')
self.include_punctuation = kwargs.pop('include_punctuation', False)
self.check_is_bool('include_punctuation')
# Set unique=False unless it's been set manually.
if 'unique' not in kwargs:
kwargs['unique'] = False
super(RandomCharField, self).__init__(*args, **kwargs)
def random_char_generator(self, chars):
for i in range(MAX_UNIQUE_QUERY_ATTEMPTS):
yield ''.join(get_random_string(self.length, chars))
raise RuntimeError('max random character attempts exceeded (%s)' %
MAX_UNIQUE_QUERY_ATTEMPTS)
def pre_save(self, model_instance, add):
if not add and getattr(model_instance, self.attname) != '':
return getattr(model_instance, self.attname)
population = ''
if self.include_alpha:
if self.lowercase:
population += string.ascii_lowercase
elif self.uppercase:
population += string.ascii_uppercase
else:
population += string.ascii_letters
if self.include_digits:
population += string.digits
if self.include_punctuation:
population += string.punctuation
random_chars = self.random_char_generator(population)
if not self.unique:
new = six.next(random_chars)
setattr(model_instance, self.attname, new)
return new
return super(RandomCharField, self).find_unique(
model_instance,
model_instance._meta.get_field(self.attname),
random_chars,
)
def internal_type(self):
return "CharField"
def deconstruct(self):
name, path, args, kwargs = super(RandomCharField, self).deconstruct()
kwargs['length'] = self.length
del kwargs['max_length']
if self.lowercase is True:
kwargs['lowercase'] = self.lowercase
if self.uppercase is True:
kwargs['uppercase'] = self.uppercase
if self.include_alpha is False:
kwargs['include_alpha'] = self.include_alpha
if self.include_digits is False:
kwargs['include_digits'] = self.include_digits
if self.include_punctuation is True:
kwargs['include_punctuation'] = self.include_punctuation
if self.unique is True:
kwargs['unique'] = self.unique
return name, path, args, kwargs
class CreationDateTimeField(DateTimeField):
""" CreationDateTimeField
By default, sets editable=False, blank=True, auto_now_add=True
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('editable', False)
kwargs.setdefault('blank', True)
kwargs.setdefault('auto_now_add', True)
DateTimeField.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "DateTimeField"
def deconstruct(self):
name, path, args, kwargs = super(CreationDateTimeField, self).deconstruct()
if self.editable is not False:
kwargs['editable'] = True
if self.blank is not True:
kwargs['blank'] = False
if self.auto_now_add is not False:
kwargs['auto_now_add'] = True
return name, path, args, kwargs
class ModificationDateTimeField(CreationDateTimeField):
""" ModificationDateTimeField
By default, sets editable=False, blank=True, auto_now=True
Sets value to now every time the object is saved.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('auto_now', True)
DateTimeField.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "DateTimeField"
def deconstruct(self):
name, path, args, kwargs = super(ModificationDateTimeField, self).deconstruct()
if self.auto_now is not False:
kwargs['auto_now'] = True
return name, path, args, kwargs
def pre_save(self, model_instance, add):
if not getattr(model_instance, 'update_modified', True):
return model_instance.modified
return super(ModificationDateTimeField, self).pre_save(model_instance, add)
class UUIDVersionError(Exception):
pass
class UUIDField(CharField):
""" UUIDField
By default uses UUID version 4 (randomly generated UUID).
The field support all uuid versions which are natively supported by the uuid python module, except version 2.
For more information see: http://docs.python.org/lib/module-uuid.html
"""
DEFAULT_MAX_LENGTH = 36
def __init__(self, verbose_name=None, name=None, auto=True, version=4, node=None, clock_seq=None, namespace=None, uuid_name=None, *args, **kwargs):
warnings.warn("Django 1.8 features a native UUIDField, this UUIDField will be removed after Django 1.7 becomes unsupported.", DeprecationWarning)
if not HAS_UUID:
raise ImproperlyConfigured("'uuid' module is required for UUIDField. (Do you have Python 2.5 or higher installed ?)")
kwargs.setdefault('max_length', self.DEFAULT_MAX_LENGTH)
if auto:
self.empty_strings_allowed = False
kwargs['blank'] = True
kwargs.setdefault('editable', False)
self.auto = auto
self.version = version
self.node = node
self.clock_seq = clock_seq
self.namespace = namespace
self.uuid_name = uuid_name or name
super(UUIDField, self).__init__(verbose_name=verbose_name, *args, **kwargs)
def create_uuid(self):
if not self.version or self.version == 4:
return uuid.uuid4()
elif self.version == 1:
return uuid.uuid1(self.node, self.clock_seq)
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
return uuid.uuid3(self.namespace, self.uuid_name)
elif self.version == 5:
return uuid.uuid5(self.namespace, self.uuid_name)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
def pre_save(self, model_instance, add):
value = super(UUIDField, self).pre_save(model_instance, add)
if self.auto and add and value is None:
value = force_text(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
else:
if self.auto and not value:
value = force_text(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
def formfield(self, **kwargs):
if self.auto:
return None
return super(UUIDField, self).formfield(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
if kwargs.get('max_length', None) == self.DEFAULT_MAX_LENGTH:
del kwargs['max_length']
if self.auto is not True:
kwargs['auto'] = self.auto
if self.version != 4:
kwargs['version'] = self.version
if self.node is not None:
kwargs['node'] = self.node
if self.clock_seq is not None:
kwargs['clock_seq'] = self.clock_seq
if self.namespace is not None:
kwargs['namespace'] = self.namespace
if self.uuid_name is not None:
kwargs['uuid_name'] = self.name
return name, path, args, kwargs
class PostgreSQLUUIDField(UUIDField):
def __init__(self, *args, **kwargs):
warnings.warn("Django 1.8 features a native UUIDField, this UUIDField will be removed after Django 1.7 becomes unsupported.", DeprecationWarning)
super(PostgreSQLUUIDField, self).__init__(*args, **kwargs)
def db_type(self, connection=None):
return "UUID"
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, six.integer_types):
value = uuid.UUID(int=value)
elif isinstance(value, (six.string_types, six.binary_type)):
if len(value) == 16:
value = uuid.UUID(bytes=value)
else:
value = uuid.UUID(value)
return super(PostgreSQLUUIDField, self).get_db_prep_value(
value, connection, prepared=False)
class ShortUUIDField(UUIDField):
""" ShortUUIDFied
Generates concise (22 characters instead of 36), unambiguous, URL-safe UUIDs.
Based on `shortuuid`: https://github.com/stochastic-technologies/shortuuid
"""
DEFAULT_MAX_LENGTH = 22
def __init__(self, *args, **kwargs):
super(ShortUUIDField, self).__init__(*args, **kwargs)
if not HAS_SHORT_UUID:
raise ImproperlyConfigured("'shortuuid' module is required for ShortUUIDField. (Do you have Python 2.5 or higher installed ?)")
kwargs.setdefault('max_length', self.DEFAULT_MAX_LENGTH)
def create_uuid(self):
if not self.version or self.version == 4:
return shortuuid.uuid()
elif self.version == 1:
return shortuuid.uuid()
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
raise UUIDVersionError("UUID version 3 is not supported.")
elif self.version == 5:
return shortuuid.uuid(name=self.namespace)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
|
|
"""Utilities for handling PDBQT files."""
from typing import Dict, List, Optional, Set, Tuple
from deepchem.utils.typing import RDKitMol
def pdbqt_to_pdb(filename: Optional[str] = None,
pdbqt_data: Optional[List[str]] = None) -> str:
"""Extracts the PDB part of a pdbqt file as a string.
Either `filename` or `pdbqt_data` must be provided. This function
strips PDBQT charge information from the provided input.
Parameters
----------
filename: str, optional (default None)
Filename of PDBQT file
pdbqt_data: List[str], optional (default None)
Raw list of lines containing data from PDBQT file.
Returns
-------
pdb_block: str
String containing the PDB portion of pdbqt file.
"""
if filename is not None and pdbqt_data is not None:
raise ValueError("Only one of filename or pdbqt_data can be provided")
elif filename is None and pdbqt_data is None:
raise ValueError("Either filename or pdbqt_data must be provided")
elif filename is not None:
pdbqt_data = open(filename).readlines()
pdb_block = ""
# FIXME: Item "None" of "Optional[List[str]]" has no attribute "__iter__" (not iterable)
for line in pdbqt_data: # type: ignore
pdb_block += "%s\n" % line[:66]
return pdb_block
def convert_protein_to_pdbqt(mol: RDKitMol, outfile: str) -> None:
"""Convert a protein PDB file into a pdbqt file.
Writes the extra PDBQT terms directly to `outfile`.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
Protein molecule
outfile: str
filename which already has a valid pdb representation of mol
"""
lines = [x.strip() for x in open(outfile).readlines()]
out_lines = []
for line in lines:
if "ROOT" in line or "ENDROOT" in line or "TORSDOF" in line:
out_lines.append("%s\n" % line)
continue
if not line.startswith("ATOM"):
continue
line = line[:66]
atom_index = int(line[6:11])
atom = mol.GetAtoms()[atom_index - 1]
line = "%s +0.000 %s\n" % (line, atom.GetSymbol().ljust(2))
out_lines.append(line)
with open(outfile, 'w') as fout:
for line in out_lines:
fout.write(line)
def _mol_to_graph(mol: RDKitMol):
"""Convert RDKit Mol to NetworkX graph
Convert mol into a graph representation atoms are nodes, and bonds
are vertices stored as graph
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
The molecule to convert into a graph.
Returns
-------
graph: networkx.Graph
Contains atoms indices as nodes, edges as bonds.
Notes
-----
This function requires NetworkX to be installed.
"""
try:
import networkx as nx
except ModuleNotFoundError:
raise ImportError("This function requires NetworkX to be installed.")
G = nx.Graph()
num_atoms = mol.GetNumAtoms()
G.add_nodes_from(range(num_atoms))
for i in range(mol.GetNumBonds()):
from_idx = mol.GetBonds()[i].GetBeginAtomIdx()
to_idx = mol.GetBonds()[i].GetEndAtomIdx()
G.add_edge(from_idx, to_idx)
return G
def _get_rotatable_bonds(mol: RDKitMol) -> List[Tuple[int, int]]:
"""
https://github.com/rdkit/rdkit/blob/f4529c910e546af590c56eba01f96e9015c269a6/Code/GraphMol/Descriptors/Lipinski.cpp#L107
Taken from rdkit source to find which bonds are rotatable store
rotatable bonds in (from_atom, to_atom)
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
Ligand molecule
Returns
-------
rotatable_bonds: List[List[int, int]]
List of rotatable bonds in molecule
Notes
-----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
from rdkit.Chem import rdmolops
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
pattern = Chem.MolFromSmarts(
"[!$(*#*)&!D1&!$(C(F)(F)F)&!$(C(Cl)(Cl)Cl)&!$(C(Br)(Br)Br)&!$(C([CH3])("
"[CH3])[CH3])&!$([CD3](=[N,O,S])-!@[#7,O,S!D1])&!$([#7,O,S!D1]-!@[CD3]="
"[N,O,S])&!$([CD3](=[N+])-!@[#7!D1])&!$([#7!D1]-!@[CD3]=[N+])]-!@[!$(*#"
"*)&!D1&!$(C(F)(F)F)&!$(C(Cl)(Cl)Cl)&!$(C(Br)(Br)Br)&!$(C([CH3])([CH3])"
"[CH3])]")
rdmolops.FastFindRings(mol)
rotatable_bonds = mol.GetSubstructMatches(pattern)
return rotatable_bonds
def convert_mol_to_pdbqt(mol: RDKitMol, outfile: str) -> None:
"""Writes the provided ligand molecule to specified file in pdbqt format.
Creates a torsion tree and write to pdbqt file. The torsion tree
represents rotatable bonds in the molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
The molecule whose value is stored in pdb format in outfile
outfile: str
Filename for a valid pdb file with the extention .pdbqt
Notes
-----
This function requires NetworkX to be installed.
"""
try:
import networkx as nx
except ModuleNotFoundError:
raise ImportError("This function requires NetworkX to be installed.")
# Walk through the original file and extract ATOM/HETATM lines and
# add PDBQT charge annotations.
pdb_map = _create_pdb_map(outfile)
graph = _mol_to_graph(mol)
rotatable_bonds = _get_rotatable_bonds(mol)
# Remove rotatable bonds from this molecule
for bond in rotatable_bonds:
graph.remove_edge(bond[0], bond[1])
# Get the connected components now that the rotatable bonds have
# been removed.
components = [x for x in nx.connected_components(graph)]
comp_map = _create_component_map(mol, components)
used_partitions = set()
lines = []
# The root is the largest connected component.
root = max(enumerate(components), key=lambda x: len(x[1]))[0]
# Write the root component
lines.append("ROOT\n")
for atom in components[root]:
lines.append(pdb_map[atom])
lines.append("ENDROOT\n")
# We've looked at the root, so take note of that
used_partitions.add(root)
for bond in rotatable_bonds:
valid, next_partition = _valid_bond(used_partitions, bond, root, comp_map)
if not valid:
continue
_dfs(used_partitions, next_partition, bond, components, rotatable_bonds,
lines, pdb_map, comp_map)
lines.append("TORSDOF %s" % len(rotatable_bonds))
with open(outfile, 'w') as fout:
for line in lines:
fout.write(line)
def _create_pdb_map(outfile: str) -> Dict[int, str]:
"""Create a mapping from atom numbers to lines to write to pdbqt
This is a map from rdkit atom number to its line in the pdb
file. We also add the two additional columns required for
pdbqt (charge, symbol).
Note rdkit atoms are 0 indexed and pdb files are 1 indexed
Parameters
----------
outfile: str
filename which already has a valid pdb representation of mol
Returns
-------
pdb_map: Dict[int, str]
Maps rdkit atom numbers to lines to be written to PDBQT file.
"""
lines = [x.strip() for x in open(outfile).readlines()]
lines = list(
filter(lambda x: x.startswith("HETATM") or x.startswith("ATOM"), lines))
lines = [x[:66] for x in lines]
pdb_map = {}
for line in lines:
my_values = line.split()
atom_number = int(my_values[1])
atom_symbol = my_values[2]
atom_symbol = ''.join([i for i in atom_symbol if not i.isdigit()])
line = line.replace("HETATM", "ATOM ")
line = "%s +0.000 %s\n" % (line, atom_symbol.ljust(2))
pdb_map[atom_number - 1] = line
return pdb_map
def _create_component_map(mol: RDKitMol,
components: List[List[int]]) -> Dict[int, int]:
"""Creates a map from atom ids to disconnected component id
For each atom in `mol`, maps it to the id of the component in the
molecule. The intent is that this is used on a molecule whose
rotatable bonds have been removed. `components` is a list of the
connected components after this surgery.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
The molecule to find disconnected components in
components: List[List[int]]
List of connected components
Returns
-------
comp_map: Dict[int, int]
Maps atom ids to component ides
"""
comp_map = {}
for i in range(mol.GetNumAtoms()):
for j in range(len(components)):
if i in components[j]:
comp_map[i] = j
break
return comp_map
def _dfs(used_partitions: Set[int], current_partition: int,
bond: Tuple[int, int], components: List[List[int]],
rotatable_bonds: List[Tuple[int, int]], lines: List[str],
pdb_map: Dict[int, str], comp_map: Dict[int, int]) -> List[str]:
"""
This function does a depth first search through the torsion tree
Parameters
----------
used_partions: Set[int]
Partitions which have already been used
current_partition: int
The current partition to expand
bond: Tuple[int, int]
the bond which goes from the previous partition into this partition
components: List[List[int]]
List of connected components
rotatable_bonds: List[Tuple[int, int]]
List of rotatable bonds. This tuple is (from_atom, to_atom).
lines: List[str]
List of lines to write
pdb_map: Dict[int, str]
Maps atom numbers to PDBQT lines to write
comp_map: Dict[int, int]
Maps atom numbers to component numbers
Returns
-------
lines: List[str]
List of lines to write. This has more appended lines.
"""
if comp_map[bond[1]] != current_partition:
bond = (bond[1], bond[0])
used_partitions.add(comp_map[bond[0]])
used_partitions.add(comp_map[bond[1]])
lines.append("BRANCH %4s %4s\n" % (bond[0] + 1, bond[1] + 1))
for atom in components[current_partition]:
lines.append(pdb_map[atom])
for b in rotatable_bonds:
valid, next_partition = \
_valid_bond(used_partitions, b, current_partition, comp_map)
if not valid:
continue
lines = _dfs(used_partitions, next_partition, b, components,
rotatable_bonds, lines, pdb_map, comp_map)
lines.append("ENDBRANCH %4s %4s\n" % (bond[0] + 1, bond[1] + 1))
return lines
def _valid_bond(used_partitions: Set[int], bond: Tuple[int, int],
current_partition: int,
comp_map: Dict[int, int]) -> Tuple[bool, int]:
"""Helper method to find next partition to explore.
Used to check if a bond goes from the current partition into a
partition that is not yet explored
Parameters
----------
used_partions: Set[int]
Partitions which have already been used
bond: Tuple[int, int]
The bond to check if it goes to an unexplored partition.
This tuple is (from_atom, to_atom).
current_partition: int
The current partition of the DFS
comp_map: Dict[int, int]
Maps atom ids to component ids
Returns
-------
is_valid: bool
Whether to exist the next partition or not
next_partition: int
The next partition to explore
"""
part1 = comp_map[bond[0]]
part2 = comp_map[bond[1]]
if part1 != current_partition and part2 != current_partition:
return False, 0
if part1 == current_partition:
next_partition = part2
else:
next_partition = part1
return next_partition not in used_partitions, next_partition
|
|
"""This module provides a generic interface for tree models
It centers around the TreeModel, which is used for all kinds of trees.
The tree gets customized by the TreeItems. Each TreeItem holds
a specific ItemData subclass. The ItemData is responsible for
delivering the data. Make sure that all TreeItems in one hierarchy have the same ItemData subclasses or at least the same column_count.
If not, make sure the data method can handle columns outside their column count.
If you want to create a tree, create the needed itemdata classes,
create a root tree item that is parent for all top-level items.
The root item does not have to provide data, so the data might be None.
It is advides to use :class:`ListItemData` because the data in the list
will be used for the headers.
Then create the tree items with their appropriate data instances.
Finally create a tree model instance with the root tree item.
"""
import abc
from PySide import QtCore
class ItemData(object):
"""An abstract class that holds data and is used as an interface for TreeItems
When subclassing implement :meth:`ItemData.data` and :meth:`ItemData.column_count`.
It is advised to reimplement :meth:`ItemData.internal_data` too.
For editable models, check :meth:`ItemData.set_data`.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def data(self, column, role): # pragma: no cover
"""Return the data for the specified column and role
The column addresses one attribute of the data.
When used in a root item, the data should return the horizontal header
data. When returning None, the section Number is used (starting at 1) by the treemodel.
So if you want an empty header, return an empty string!
:param column: the data column
:type column: int
:param role: the data role
:type role: QtCore.Qt.ItemDataRole
:returns: data depending on the role
:rtype:
:raises: None
"""
pass
def set_data(self, column, value, role):
"""Set the data for the given column to value
The default implementation returns False
:param column: the column to set
:type column: int
:param value: the value to set
:param role: the role, usually EditRole
:type role: :class:`QtCore.Qt.ItemDataRole`
:returns: True, if editing was successfull
:rtype: :class:`bool`
:raises: None
"""
return False
@abc.abstractmethod
def column_count(self, ): # pragma: no cover
"""Return the number of columns that can be queried for data
:returns: the number of columns
:rtype: int
:raises: None
"""
pass
def internal_data(self, ):
"""Return the internal data of the ItemData
E.g. a ListItemData could return the list it uses, a
ProjectItemData could return the Project etc.
:returns: the data the itemdata uses as information
:rtype: None|arbitrary data
:raises: None
"""
return None
def flags(self, column):
"""Return the item flags for the item
Default is QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
:param column: the column to query
:type column: int
:returns: the item flags
:rtype: QtCore.Qt.ItemFlags
:raises: None
"""
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
class ListItemData(ItemData):
"""Item data for generic lists
Initialize it with a list of objects. Each element corresponds to a column.
For DisplayRole the objects are converted to strings with ``str()``.
"""
def __init__(self, liste, editable=False):
"""Initialize a new StringItemData with the given list
:param list: a list of objects, one for each column
:type list: list of objects
:param editable: If True, the list is editable
:type editable: :class:`bool`
:raises: None
"""
super(ListItemData, self).__init__()
self._list = liste
self._editable = editable
def data(self, column, role):
"""Return the data for the specified column and role
For DisplayRole the element in the list will be converted to a sting and returned.
:param column: the data column
:type column: int
:param role: the data role
:type role: QtCore.Qt.ItemDataRole
:returns: data depending on the role, or None if the column is out of range
:rtype: depending on the role or None
:raises: None
"""
if role == QtCore.Qt.DisplayRole:
if column >= 0 and column < len(self._list):
return str(self._list[column])
def set_data(self, column, value, role):
"""Set the data for the given column to value
The default implementation returns False
:param column: the column to set
:type column: int
:param value: the value to set
:param role: the role, usually EditRole
:type role: :class:`QtCore.Qt.ItemDataRole`
:returns: True, if editing was successfull
:rtype: :class:`bool`
:raises: None
"""
if role == QtCore.Qt.EditRole or role == QtCore.Qt.DisplayRole:
self._list[column] = value
return True
else:
return False
def column_count(self, ):
"""Return the number of columns that can be queried for data
:returns: the number of columns
:rtype: int
:raises: None
"""
return len(self._list)
def internal_data(self, ):
"""Return the list
:returns: the internal list
:rtype: :class:`list`
:raises: None
"""
return self._list
def flags(self, column):
"""Return the item flags for the item
Default is QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
:param column: the column to query
:type column: int
:returns: the item flags
:rtype: QtCore.Qt.ItemFlags
:raises: None
"""
flags = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
if self._editable:
flags = flags | QtCore.Qt.ItemIsEditable
return flags
class TreeItem(object):
"""General TreeItem
You can represent a tree structure with these tree items. Each item
should contain some data that it can give to the model.
Note that each tree always has one root item.
Even if you have multiple top level items, they are all grouped under one
root. The data for the root item can be None but it is advised to use
a ListItemData so you can provide horizontal headers.
TreeItems should always belong to only one model.
Once a new TreeModel gets initialized all TreeItems will share the same model.
When you add a new Item or delete one, the model gets automatically updated.
You do not need to call TreeModel insertRow or removeRow. Just use add_child, remove_child
or create a new TreeItem and provide a parent item to the constructor.
"""
def __init__(self, data, parent=None):
"""Initialize a new TreeItem that holds some data and might be parented under parent
The child count will be zero. Will automatically set the parent and update the model
if the parent is not None.
:param data: the data item. if the tree item is the root,
the data will be used for horizontal headers!
It is recommended to use :class:`ListItemData` in that case.
:type data: :class:`ItemData`
:param parent: the parent treeitem
:type parent: :class:`TreeItem`
:raises: None
"""
self._model = None
self._data = data
self._parent = parent
self.childItems = []
if self._parent is not None:
self._parent.add_child(self)
def get_model(self, ):
"""Return the model the item belongs to
:returns: the model the item belongs to or None if it belongs to none
:rtype: :class:`TreeModel` | None
:raises: None
"""
return self._model
def set_model(self, model):
"""Set the model the item belongs to
A TreeItem can only belong to one model.
:param model: the model the item belongs to
:type model: :class:`Treemodel`
:returns: None
:rtype: None
:raises: None
"""
self._model = model
for c in self.childItems:
c.set_model(model)
def add_child(self, child):
"""Add child to children of this TreeItem
:param child: the child TreeItem
:type child: :class:`TreeItem`
:returns: None
:rtype: None
:raises: None
"""
child.set_model(self._model)
if self._model:
row = len(self.childItems)
parentindex = self._model.index_of_item(self)
self._model.insertRow(row, child, parentindex)
else:
self.childItems.append(child)
def remove_child(self, child):
"""Remove the child from this TreeItem
:param child: the child TreeItem
:type child: :class:`TreeItem`
:returns: None
:rtype: None
:raises: ValueError
"""
child.set_model(None)
if self._model:
row = self.childItems.index(child)
parentindex = self._model.index_of_item(self)
self._model.removeRow(row, parentindex)
else:
self.childItems.remove(child)
def child(self, row):
"""Return the child at the specified row
:param row: the row number
:type row: int
:returns: the child
:rtype: :class:`TreeItem`
:raises: IndexError
"""
return self.childItems[row]
def child_count(self, ):
"""Return the number of children
:returns: child coun
:rtype: int
:raises: None
"""
return len(self.childItems)
def row(self, ):
"""Return the index of this tree item in the parent rows
:returns: the row of this TreeItem in the parent
:rtype: int
:raises: None
"""
if self._parent is None:
return 0
return self._parent.childItems.index(self)
def column_count(self, ):
"""Return the number of columns that the children have
If there are no children, return the column count of its own data.
:returns: the column count of the children data
:rtype: int
:raises: None
"""
if self.child_count():
return self.childItems[0]._data.column_count()
else:
return self._data.column_count() if self._data else 0
def data(self, column, role):
"""Return the data for the column and role
:param column: the data column
:type column: int
:param role: the data role
:type role: QtCore.Qt.ItemDataRole
:returns: data depending on the role
:rtype:
:raises: None
"""
if self._data is not None and (column >= 0 or column < self._data.column_count()):
return self._data.data(column, role)
def set_data(self, column, value, role):
"""Set the data of column to value
:param column: the column to set
:type column: int
:param value: the value to set
:param role: the role, usually EditRole
:type role: :class:`QtCore.Qt.ItemDataRole`
:returns: True, if data successfully changed
:rtype: :class:`bool`
:raises: None
"""
if not self._data or column >= self._data.column_count():
return False
return self._data.set_data(column, value, role)
def parent(self, ):
"""Return the parent tree item
:returns: the parent or None if there is no parent
:rtype: :class:`TreeItem`
:raises: None
"""
return self._parent
def set_parent(self, parent):
"""Set the parent of the treeitem
:param parent: parent treeitem
:type parent: :class:`TreeItem` | None
:returns: None
:rtype: None
:raises: None
"""
if self._parent == parent:
return
if self._parent:
self._parent.remove_child(self)
self._parent = parent
if parent:
parent.add_child(self)
def itemdata(self, ):
"""Return the internal :class:`ItemData`
:returns: the internal ItemData
:rtype: :class:`ItemData`
:raises: None
"""
return self._data
def internal_data(self, ):
"""Return the internal data of the item data
E.g. a ListItemData could return the list it uses, a
ProjectItemData could return the Project etc.
:returns: the data the itemdata uses as information
:rtype: None|arbitrary data
:raises: None
"""
return self._data.internal_data()
def flags(self, index):
"""Return the flags for the item
:param index: the index to query
:type index: :class:`QtCore.QModelIndex`
:returns: the flags
:rtype: QtCore.Qt.ItemFlags
:raises: None
"""
return self._data.flags(index.column())
class TreeModel(QtCore.QAbstractItemModel):
"""A tree model that uses the :class:`TreeItem` to represent a general tree.
The model uses :class:`TreeItem` instances in an hierarchy to build a tree.
Each tree item represents a row. The tree items can hold arbitrary :class:`ItemData`
instances.
The model will get automatically updated, when the hierarchy of the tree items changes.
You rarely have to use model methods for that. Just use methods of the tree items.
All models need at least a root. The root is responsible for the headers.
So the :class:`ItemData` of the root should have a columns for each header and return
a string for them when queried with :data:`QtCore.Qt.DisplayRole`. Only horizontal
headers are supported at the moment. Vertical headers get numbers.
"""
def __init__(self, root, parent=None):
"""Initialize a new tree model with the given root treeitem
:param root: the root tree item. The root tree item is responsible for the headers.
A :class:`ListItemData` with the headers is suitable as data for the item.
:type root: :class:`TreeItem`
:param parent: the parent for the model
:type parent: :class:`QtCore.QObject`
:raises: None
"""
super(TreeModel, self).__init__(parent)
self._root = root
self._root.set_model(self)
def index(self, row, column, parent=None):
"""Return the index of the item in the model specified by the given row,
column and parent index.
:param row: the row of the item
:type row: int
:param column: the column for the item
:type column: int
:param parent: the parent index
:type parent: :class:`QtCore.QModelIndex`:
:returns: the index of the item
:rtype: :class:`QtCore.QModelIndex`
:raises: None
"""
if parent is None:
parent = QtCore.QModelIndex()
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if parent.isValid():
parentItem = parent.internalPointer()
else:
parentItem = self._root
try:
childItem = parentItem.child(row)
return self.createIndex(row, column, childItem)
except IndexError:
return QtCore.QModelIndex()
def parent(self, index):
"""Return the parent of the model item with the given index.
If the item has no parent, return an invalid QModelIndex.
:param index: the index that you want to know the parent of
:type index: :class:`QtCore.QModelIndex`
:returns: parent index
:rtype: :class:`QtCore.QModelIndex`
:raises: None
"""
if not index.isValid():
return QtCore.QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent()
if parentItem is self._root:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent):
"""Return the number of rows under the given parent.
When the parent is valid return rowCount the number
of children of parent.
:param parent: the parent index
:type parent: :class:`QtCore.QModelIndex`:
:returns: the row count
:rtype: int
:raises: None
"""
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self._root
else:
parentItem = parent.internalPointer()
return parentItem.child_count()
def columnCount(self, parent):
"""Return the number of columns for the children of the given parent.
:param parent: the parent index
:type parent: :class:`QtCore.QModelIndex`:
:returns: the column count
:rtype: int
:raises: None
"""
if parent.isValid():
return parent.internalPointer().column_count()
else:
return self._root.column_count()
def data(self, index, role=QtCore.Qt.DisplayRole):
"""Return the data stored under the given role for the item referred to by the index.
:param index: the index
:type index: :class:`QtCore.QModelIndex`
:param role: the data role
:type role: QtCore.Qt.ItemDataRole
:returns: some data depending on the role
:raises: None
"""
if not index.isValid():
return
item = index.internalPointer()
return item.data(index.column(), role)
def setData(self, index, value, role=QtCore.Qt.EditRole):
"""Set the data of the given index to value
:param index: the index to set
:type index: :class:`QtCore.QModelIndex`
:param value: the value to set
:param role: the role, usually edit role
:type role: :data:`QtCore.Qt.ItemDataRole`
:returns: True, if successfull, False if unsuccessfull
:rtype: :class:`bool`
:raises: None
"""
if not index.isValid():
return False
item = index.internalPointer()
r = item.set_data(index.column(), value, role)
if r:
self.dataChanged.emit(index, index)
return r
def headerData(self, section, orientation, role):
"""Return the header data
Will call :meth:`TreeItem.data` of the root :class:`TreeItem` with the
given section (column) and role for horizontal orientations.
Vertical orientations are numbered.
:param section: the section in the header view
:type section: int
:param orientation: vertical or horizontal orientation
:type orientation: :data:`QtCore.Qt.Vertical` | :data:`QtCore.Qt.Horizontal`
:param role: the data role.
:type role: :data:`QtCore.Qt.ItemDataRole`
:returns: data for the header
:raises: None
"""
if orientation == QtCore.Qt.Horizontal:
d = self._root.data(section, role)
if d is None and role == QtCore.Qt.DisplayRole:
return str(section+1)
return d
if orientation == QtCore.Qt.Vertical and role == QtCore.Qt.DisplayRole:
return str(section+1)
def insertRow(self, row, item, parent):
"""Insert a single item before the given row in the child items of the parent specified.
:param row: the index where the rows get inserted
:type row: int
:param item: the item to insert. When creating the item, make sure it's parent is None.
If not it will defeat the purpose of this function.
:type item: :class:`TreeItem`
:param parent: the parent
:type parent: :class:`QtCore.QModelIndex`
:returns: Returns true if the row is inserted; otherwise returns false.
:rtype: bool
:raises: None
"""
item.set_model(self)
if parent.isValid():
parentitem = parent.internalPointer()
else:
parentitem = self._root
self.beginInsertRows(parent, row, row)
item._parent = parentitem
if parentitem:
parentitem.childItems.insert(row, item)
self.endInsertRows()
return True
def removeRow(self, row, parent):
"""Remove row from parent
:param row: the row index
:type row: int
:param parent: the parent index
:type parent: :class:`QtCore.QModelIndex`
:returns: True if row is inserted; otherwise returns false.
:rtype: bool
:raises: None
"""
if parent.isValid():
parentitem = parent.internalPointer()
else:
parentitem = self._root
self.beginRemoveRows(parent, row, row)
item = parentitem.childItems[row]
item.set_model(None)
item._parent = None
del parentitem.childItems[row]
self.endRemoveRows()
return True
@property
def root(self, ):
"""Return the root tree item
:returns: the root item
:rtype: :class:`TreeItem`
:raises: None
"""
return self._root
def flags(self, index):
"""Return the flags for the given index
This will call :meth:`TreeItem.flags` for valid ones.
:param index: the index to query
:type index: :class:`QtCore.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
if index.isValid():
item = index.internalPointer()
return item.flags(index)
else:
super(TreeModel, self).flags(index)
def index_of_item(self, item):
"""Get the index for the given TreeItem
:param item: the treeitem to query
:type item: :class:`TreeItem`
:returns: the index of the item
:rtype: :class:`QtCore.QModelIndex`
:raises: ValueError
"""
# root has an invalid index
if item == self._root:
return QtCore.QModelIndex()
# find all parents to get their index
parents = [item]
i = item
while True:
parent = i.parent()
# break if parent is root because we got all parents we need
if parent == self._root:
break
if parent is None:
# No new parent but last parent wasn't root!
# This means that the item was not in the model!
return QtCore.QModelIndex()
# a new parent was found and we are still not at root
# search further until we get to root
i = parent
parents.append(parent)
# get the parent indexes until
index = QtCore.QModelIndex()
for treeitem in reversed(parents):
parent = treeitem.parent()
row = parent.childItems.index(treeitem)
index = self.index(row, 0, index)
return index
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental library that exposes XLA operations directly in TensorFlow.
It is sometimes useful to be able to build HLO programs directly from
TensorFlow. This file provides Tensorflow operators that mirror the semantics of
HLO operators as closely as possible.
Note: Most of the operators defined in this module are used by the jax2tf
converter (see go/jax2tf for details) and are used in SavedModel produced
by jax2tf. Hence, we need to maintain backwards compatibility for these
operators. Please reach out to the JAX team if you want to make changes.
"""
from tensorflow.compiler.tf2xla.ops import gen_xla_ops
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops.numpy_ops import np_utils
# TODO(phawkins): provide wrappers for all XLA operators. Currently the missing
# ops include:
# infeed/outfeed (available via tf.contrib.tpu)
# collectives, e.g., cross-replica-sum (available via tf.contrib.tpu)
# conditional
# gather/scatter
# collapse
# This file reuses builtin names (following XLA's names, so we can call things
# like xla.max), so we capture the builtin versions here.
# pylint: disable=redefined-builtin
_max = max
_min = min
_slice = slice # pylint: disable=invalid-name
constant = constant_op.constant
# Unary operators.
# For most arithmetic operators there is a TensorFlow operator
# that exactly corresponds to each XLA operator. Rather than defining
# XLA-specific variants, we reuse the corresponding TensorFlow operator.
# TODO(phawkins): It would be even better to have TensorFlow operators that 1:1
# wrap every HLO operator, because that would allow us to be confident that the
# semantics match.
def _unary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def unary_op_wrapper(x, name=None):
return fn(x, name=name)
return unary_op_wrapper
abs = _unary_op(math_ops.abs)
# TODO(phawkins): implement clz.
conj = _unary_op(math_ops.conj)
cos = _unary_op(math_ops.cos)
ceil = _unary_op(math_ops.ceil)
digamma = _unary_op(math_ops.digamma)
erf = _unary_op(math_ops.erf)
erfc = _unary_op(math_ops.erfc)
erfinv = _unary_op(math_ops.erfinv)
ndtri = _unary_op(math_ops.ndtri)
exp = _unary_op(math_ops.exp)
expm1 = _unary_op(math_ops.expm1)
floor = _unary_op(math_ops.floor)
imag = _unary_op(math_ops.imag)
is_finite = _unary_op(math_ops.is_finite)
lgamma = _unary_op(math_ops.lgamma)
log = _unary_op(math_ops.log)
log1p = _unary_op(math_ops.log1p)
logical_not = _unary_op(math_ops.logical_not)
neg = _unary_op(math_ops.neg)
real = _unary_op(math_ops.real)
# TODO(phawkins): unlike xla::Round, this rounds to even instead of zero for
# numbers halfway between two integers.
round = _unary_op(math_ops.round)
sin = _unary_op(math_ops.sin)
sign = _unary_op(math_ops.sign)
tanh = _unary_op(math_ops.tanh)
# Bessel
bessel_i0e = _unary_op(special_math_ops.bessel_i0e)
bessel_i1e = _unary_op(special_math_ops.bessel_i1e)
# Binary operators
# The main difference between TensorFlow and XLA binary ops is the broadcasting
# semantics. TensorFlow uses Numpy-style broadcasting semantics, whereas XLA
# requires an explicit specification of which dimensions to broadcast if the
# arguments have different ranks.
def _broadcasting_binary_op(fn):
"""Wraps a binary Tensorflow operator and performs XLA-style broadcasting."""
def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None):
"""Inner wrapper function."""
broadcast_dims = broadcast_dims or []
broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64)
# Rather than relying on having static shape information in the TensorFlow
# graph, we use an XlaBroadcastHelper op that can compute the correct shapes
# at JIT compilation time.
x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims)
return fn(x, y, name=name)
return broadcasting_binary_op_wrapper
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
dtypes.int8: dtypes.uint8,
dtypes.int16: dtypes.uint16,
dtypes.int32: dtypes.uint32,
dtypes.int64: dtypes.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {
dtypes.uint8: dtypes.int8,
dtypes.uint16: dtypes.int16,
dtypes.uint32: dtypes.int32,
dtypes.uint64: dtypes.int64,
}
def _shift_right_logical_helper(x, y, name=None):
"""Performs an integer right logical shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
signed = dtype in _SIGNED_TO_UNSIGNED_TABLE
if signed:
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[dtype]
x = math_ops.cast(x, unsigned_dtype)
y = math_ops.cast(y, unsigned_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if signed:
output = math_ops.cast(output, dtype)
return output
def _shift_right_arithmetic_helper(x, y, name=None):
"""Performs an integer right arithmetic shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
unsigned = dtype in _UNSIGNED_TO_SIGNED_TABLE
if unsigned:
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[dtype]
x = math_ops.cast(x, signed_dtype)
y = math_ops.cast(y, signed_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if unsigned:
output = math_ops.cast(output, dtype)
return output
add = _broadcasting_binary_op(math_ops.add)
sub = _broadcasting_binary_op(math_ops.sub)
mul = _broadcasting_binary_op(math_ops.mul)
div = _broadcasting_binary_op(math_ops.div)
rem = _broadcasting_binary_op(gen_math_ops.mod)
max = _broadcasting_binary_op(math_ops.maximum)
min = _broadcasting_binary_op(math_ops.minimum)
atan2 = _broadcasting_binary_op(math_ops.atan2)
complex = _broadcasting_binary_op(math_ops.complex)
logical_and = _broadcasting_binary_op(math_ops.logical_and)
logical_or = _broadcasting_binary_op(math_ops.logical_or)
logical_xor = _broadcasting_binary_op(math_ops.logical_xor)
eq = _broadcasting_binary_op(math_ops.equal)
ne = _broadcasting_binary_op(math_ops.not_equal)
ge = _broadcasting_binary_op(math_ops.greater_equal)
gt = _broadcasting_binary_op(math_ops.greater)
le = _broadcasting_binary_op(math_ops.less_equal)
lt = _broadcasting_binary_op(math_ops.less)
pow = _broadcasting_binary_op(math_ops.pow)
shift_left = _broadcasting_binary_op(bitwise_ops.left_shift)
shift_right_logical = _broadcasting_binary_op(_shift_right_logical_helper)
shift_right_arithmetic = _broadcasting_binary_op(_shift_right_arithmetic_helper)
igamma = _broadcasting_binary_op(math_ops.igamma)
igamma_grad_a = _broadcasting_binary_op(gen_math_ops.igamma_grad_a)
random_gamma_grad = _broadcasting_binary_op(gen_random_ops.random_gamma_grad)
igammac = _broadcasting_binary_op(math_ops.igammac)
polygamma = _broadcasting_binary_op(math_ops.polygamma)
zeta = _broadcasting_binary_op(math_ops.zeta)
def _binary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def binary_op_wrapper(x, y, name=None):
return fn(x, y, name=name)
return binary_op_wrapper
transpose = _binary_op(array_ops.transpose)
rev = _binary_op(array_ops.reverse)
bitcast_convert_type = array_ops.bitcast
def broadcast(x, dims, name=None):
x = ops.convert_to_tensor(x)
shape = array_ops.concat([constant_op.constant(dims),
array_ops.shape(x)],
axis=0)
return array_ops.broadcast_to(x, shape, name=name)
def clamp(a, x, b, name=None):
return min(max(a, x, name=name), b, name=name)
concatenate = array_ops.concat
def conv(lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dimension_numbers,
feature_group_count=1,
precision_config=None,
preferred_element_type=None,
name=None,
use_v2=False,
batch_group_count=1):
"""Wraps the XLA ConvGeneralDilated operator.
ConvGeneralDilated is the most general form of XLA convolution and is
documented at
https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
Args:
lhs: the input tensor
rhs: the kernel tensor
window_strides: the inter-window strides
padding: the padding to apply at the start and end of each input dimensions
lhs_dilation: dilation to apply between input elements
rhs_dilation: dilation to apply between kernel elements
dimension_numbers: a `ConvolutionDimensionNumbers` proto.
feature_group_count: number of feature groups for grouped convolution.
precision_config: a `xla.PrecisionConfig` proto.
preferred_element_type: the result `dtype`.
name: an optional name for the operator.
use_v2: an optional request to use the XlaConvV2 op even if not necessary.
batch_group_count: number of batch groups or grouped filters.
Returns:
A tensor representing the output of the convolution.
"""
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
needs_v2 = (
preferred_element_type or (lhs.dtype != rhs.dtype) or
batch_group_count > 1)
if preferred_element_type is None:
preferred_element_type = np_utils.result_type(lhs.dtype, rhs.dtype)
if needs_v2 or use_v2:
return gen_xla_ops.xla_conv_v2(
lhs,
rhs,
window_strides=window_strides,
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
preferred_element_type=preferred_element_type,
name=name)
return gen_xla_ops.xla_conv(
lhs,
rhs,
window_strides=window_strides,
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
convert_element_type = math_ops.cast
def dot(lhs, rhs, name=None):
return math_ops.tensordot(lhs, rhs, axes=1, name=name)
def dot_general(lhs,
rhs,
dimension_numbers,
precision_config=None,
preferred_element_type=None,
name=None,
use_v2=False):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
needs_v2 = preferred_element_type or (lhs.dtype != rhs.dtype)
if preferred_element_type is None:
preferred_element_type = np_utils.result_type(lhs.dtype, rhs.dtype)
if needs_v2 or use_v2:
return gen_xla_ops.xla_dot_v2(
lhs,
rhs,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
preferred_element_type=preferred_element_type,
name=name)
return gen_xla_ops.xla_dot(
lhs,
rhs,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
def self_adjoint_eig(a, lower, max_iter, epsilon):
return gen_xla_ops.xla_self_adjoint_eig(a, lower, max_iter, epsilon)
def svd(a, max_iter, epsilon, precision_config=None):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_svd(a, max_iter, epsilon, precision_config_proto)
dynamic_slice = gen_xla_ops.xla_dynamic_slice
dynamic_update_slice = gen_xla_ops.xla_dynamic_update_slice
einsum = gen_xla_ops.xla_einsum
# TODO(phawkins): generalize tf.pad to support interior padding, and then remove
# the XLA-specific pad operator.
pad = gen_xla_ops.xla_pad
def random_normal(mu, sigma, dims, name=None):
mu = ops.convert_to_tensor(mu)
return random_ops.random_normal(
dims, mean=mu, stddev=sigma, dtype=mu.dtype, name=name)
def random_uniform(minval, maxval, dims, name=None):
minval = ops.convert_to_tensor(minval)
return random_ops.random_uniform(
dims, minval, maxval, dtype=minval.dtype, name=name)
def rng_bit_generator(algorithm, initial_state, shape, dtype):
"""Stateless PRNG bit generator.
Wraps the XLA RngBitGenerator operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator.
Args:
algorithm: The PRNG algorithm to use, one of
tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}.
initial_state: Initial state for the PRNG algorithm. For THREEFRY, it
should be a u64[2] and for PHILOX a u64[3].
shape: The output shape of the generated data.
dtype: The type of the tensor.
Returns:
a tuple with a new state and generated data of the given shape.
"""
alg_int = stateless_random_ops.convert_alg_to_int(algorithm)
return gen_xla_ops.xla_rng_bit_generator(alg_int, initial_state, shape,
dtype=dtype)
recv = gen_xla_ops.xla_recv
reduce = gen_xla_ops.xla_reduce
variadic_reduce = gen_xla_ops.xla_variadic_reduce_v2
ops.no_gradient("XlaVariadicReduce")
def reduce_window(operand,
init,
reducer,
window_dimensions,
window_strides=None,
base_dilations=None,
window_dilations=None,
padding=None,
name=None):
"""Wraps the XLA ReduceWindow operator.
ReduceWindow is documented at
https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
Args:
operand: the input tensor
init: a scalar tensor representing the initial value for the reduction
reducer: a reduction function that combines a pair of scalars.
window_dimensions: shape of the window, as a list of integers
window_strides: inter-window strides, as a list of integers. Optional; if
omitted, defaults to strides of 1.
padding: padding to apply to 'operand'. List of (low, high) pairs of
integers that specify the padding to apply before and after each
dimension. Optional; if omitted, defaults to no padding.
name: the operator name, or None.
Returns:
A tensor that represents the output of the reduce_window operator.
"""
window_strides = window_strides or [1] * len(window_dimensions)
base_dilations = base_dilations or [1] * len(window_dimensions)
window_dilations = window_dilations or [1] * len(window_dimensions)
padding = padding or [(0, 0)] * len(window_dimensions)
return gen_xla_ops.xla_reduce_window(
input=operand,
init_value=init,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=base_dilations,
window_dilations=window_dilations,
padding=padding,
computation=reducer,
name=name)
replica_id = gen_xla_ops.xla_replica_id
# Set a static bound for the given input value as a hint to Xla compiler,
# returns the same value.
# Usage:
# def f(t, p):
# p = xla.set_bound(p, 3) # Tells xla the constraint that p <= 3.
# return t[:p] # xla knows the bound of the slice is 3.
set_bound = gen_xla_ops.xla_set_bound
# Make a static dimension into a xla bounded dynamic dimension. The current
# static dimension size will become the bound and the second operand becomes the
# dynamic size of the dimension.
#
# This should mostly be used for testing.
#
# def f():
# array = tf.convert_to_tensor([[1, 2, 3, 4, 5]])
# # Tells xla the valid size of the array is 3.
# dim = 0
# p = xla_set_dynamic_dimension_size(array, dim, 3)
# assert(reduce_sum(p) == 6) # xla knows only the first 3 elements are valid.
set_dynamic_dimension_size = gen_xla_ops.xla_set_dynamic_dimension_size
# Inverse of xla_set_dynamic_dimension_size. Make an xla bounded dynamic
# dimension into a static dimension. The bound of the size of dimension
# `dim_index` becomes the static dimension size.
remove_dynamic_dimension_size = gen_xla_ops.xla_remove_dynamic_dimension_size
def reshape(x, new_sizes, dimensions=None, name=None):
if dimensions is not None:
x = array_ops.transpose(x, dimensions)
x = array_ops.reshape(x, new_sizes, name=name)
return x
def select(condition, x, y, name=None):
return array_ops.where(condition, x, y, name)
select_and_scatter = gen_xla_ops.xla_select_and_scatter
send = gen_xla_ops.xla_send
def slice(x, start_dims, limit_dims, strides):
spec = [
_slice(start, limit, stride)
for (start, limit, stride) in zip(start_dims, limit_dims, strides)
]
return x[tuple(spec)]
sharding = gen_xla_ops.xla_sharding
@ops.RegisterGradient("XlaSharding")
def _sharding_grad(op, grad):
"""Gradient for XlaSharding op."""
sharding_attr = op.get_attr("sharding")
grad_sharding = gen_xla_ops.xla_sharding(
grad,
sharding=sharding_attr,
unspecified_dims=op.get_attr("unspecified_dims"))
# pylint: disable=protected-access
grad_sharding.op._set_attr("_XlaSharding",
attr_value_pb2.AttrValue(s=sharding_attr))
return [grad_sharding]
spmd_full_to_shard_shape = gen_xla_ops.xla_spmd_full_to_shard_shape
spmd_shard_to_full_shape = gen_xla_ops.xla_spmd_shard_to_full_shape
@ops.RegisterGradient("XlaSpmdFullToShardShape")
def _spmd_full_to_shard_shape_grad(op, grad):
s2f = gen_xla_ops.xla_spmd_shard_to_full_shape(
grad,
manual_sharding=op.get_attr("manual_sharding"),
full_shape=op.inputs[0].shape.as_list(),
dim=op.get_attr("dim"),
unspecified_dims=op.get_attr("unspecified_dims"))
return [s2f]
@ops.RegisterGradient("XlaSpmdShardToFullShape")
def _spmd_shard_to_full_shape_grad(op, grad):
f2s = gen_xla_ops.xla_spmd_full_to_shard_shape(
grad,
manual_sharding=op.get_attr("manual_sharding"),
dim=op.get_attr("dim"),
unspecified_dims=op.get_attr("unspecified_dims"))
return [f2s]
sort = gen_xla_ops.xla_sort
key_value_sort = gen_xla_ops.xla_key_value_sort
variadic_sort = gen_xla_ops.xla_variadic_sort
while_loop = gen_xla_ops.xla_while
dequantize = gen_xla_ops.xla_dequantize
custom_call = gen_xla_ops.xla_custom_call
def gather(operand, start_indices, dimension_numbers, slice_sizes,
indices_are_sorted=False, name=None):
return gen_xla_ops.xla_gather(
operand,
start_indices,
slice_sizes=slice_sizes,
dimension_numbers=dimension_numbers.SerializeToString(),
indices_are_sorted=indices_are_sorted,
name=name)
def scatter(operand, scatter_indices, updates, update_computation,
dimension_numbers, indices_are_sorted=False, name=None):
return gen_xla_ops.xla_scatter(
operand,
scatter_indices,
updates,
update_computation=update_computation,
dimension_numbers=dimension_numbers.SerializeToString(),
indices_are_sorted=indices_are_sorted,
name=name)
def optimization_barrier(*args):
return gen_xla_ops.xla_optimization_barrier(args)
|
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2009 Benoit Chesneau <benoitc@e-engura.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import with_statement
import codecs
import datetime
import os
import re
from stat import *
import sys
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
from jinja2.utils import open_if_exists
try:
import markdown
except ImportError:
markdown = None
try:
from textile import textile
except ImportError:
textile = None
import PyRSS2Gen
import conf
# could be better
re_date = re.compile('^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])-(.*)$')
template_env = Environment(loader=FileSystemLoader(conf.TEMPLATES_PATH, encoding="utf-8"))
template_env.charset = 'utf-8'
def render_template(template_name, _stream=False, **kwargs):
""" render jinja template """
tmpl = template_env.get_template(template_name)
context = kwargs
if _stream:
return tmpl.stream(context)
return tmpl.render(context)
def relative_url(value):
site_url = conf.SITE_URL
if site_url.endswith('/'):
site_url = site_url[:-1]
return value.split(site_url)[1]
template_env.filters['rel_url'] = relative_url
def source_newer(source, target):
if len(sys.argv) > 1 and sys.argv[1] == "force":
return True
if not os.path.exists(target):
return True
else:
smtime = os.stat(source)[ST_MTIME]
tmtime = os.stat(target)[ST_MTIME]
return smtime > tmtime
def convert_markdown(value):
md = markdown.Markdown(output_format="html")
md.set_output_format('html')
return md.convert(value)
def convert_textile(value):
return textile(value, validate=False, head_offset=False,
sanitize=False, encoding='utf-8', output='utf-8').decode('utf-8')
def rfc3339_date(date):
# iso8601
if date.tzinfo:
return date.strftime('%Y-%m-%dT%H:%M:%S%z')
else:
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
class Site(object):
def __init__(self):
self.sitemap = []
self.feed = []
site_url = conf.SITE_URL
if site_url.endswith('/'):
site_url = site_url[:-1]
self.site_url = site_url
def process_directory(self, current_dir, files, target_path):
files = [f for f in files if os.path.splitext(f)[1] in conf.EXTENSIONS]
blog = None
for f in files:
print "process %s" % f
page = Page(self, f, current_dir, target_path)
if page.is_blog() and f == "index.txt" or f == "archives.txt":
continue
elif page.is_blog():
if blog is None:
blog = Blog(self, current_dir, target_path)
blog.append(page)
continue
if not source_newer(page.finput, page.foutput) and f != "index.txt":
continue
print "write %s" % page.foutput
try:
f = codecs.open(page.foutput, 'w', 'utf-8')
try:
f.write(page.render())
finally:
f.close()
except (IOError, OSError), err:
raise
self.sitemap.append(page)
if blog is not None:
blog.render()
def generate_rss(self):
rss = PyRSS2Gen.RSS2(
title = conf.SITE_NAME,
link = conf.SITE_URL,
description = conf.SITE_DESCRIPTION,
lastBuildDate = datetime.datetime.utcnow(),
items = [])
for i, e in enumerate(self.feed):
item = PyRSS2Gen.RSSItem(
title = e['title'],
link = e['link'],
description = e['description'],
guid = PyRSS2Gen.Guid(e['link']),
pubDate = datetime.datetime.fromtimestamp(e['pubDate']))
rss.items.append(item)
if i == 15: break
rss.write_xml(open(os.path.join(conf.OUTPUT_PATH, "feed.xml"), "w"))
def generate_sitemap(self):
xml = u'<?xml version="1.0" encoding="UTF-8"?>'
xml += u'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
for page in self.sitemap:
xml += u'<url>'
xml += u'<loc>%s</loc>' % page.url
xml += u'<lastmod>%s</lastmod>' % rfc3339_date(page.headers['published'])
xml += u'<changefreq>daily</changefreq>'
xml += u'<priority>0.5</priority>'
xml += u'</url>'
xml += u'</urlset>'
with codecs.open(os.path.join(conf.OUTPUT_PATH, "sitemaps.xml"), "w", "utf-8") as f:
f.write(xml)
def render(self):
for root, dirs, files in os.walk(conf.INPUT_PATH):
target_path = root.replace(conf.INPUT_PATH, conf.OUTPUT_PATH)
if not os.path.isdir(target_path):
os.makedirs(target_path)
self.process_directory(root, files, target_path)
if self.feed:
self.feed.sort(lambda a, b: a['pubDate'] - b['pubDate'], reverse=True)
self.generate_rss()
if self.sitemap:
self.generate_sitemap()
class Blog(object):
def __init__(self, site, current_dir, target_path):
self.site = site
self.current_dir = current_dir
self.target_path = target_path
self.pages = []
def append(self, page):
paras = [p for p in page.body.split("\n\n") if p]
if paras:
description = "\n\n".join(paras[0:2])
content_type = page.headers.get('content_type', conf.CONTENT_TYPE)
if content_type == "markdown":
description = convert_markdown(description)
elif content_type == "textile":
description = convert_textile(description)
m = re_date.match(os.path.splitext(page.filename)[0])
if m:
date = "%s-%s-%s" % (m.group(1), m.group(2), m.group(3))
else:
date = ""
page.headers['date'] = date
page.headers['description'] = description
self.pages.append(page)
def render(self):
index_page = Page(self.site, "index.txt", self.current_dir,
self.target_path)
try:
archives_page = Page(self.site, "archives.txt", self.current_dir,
self.target_path)
except IOError:
archives_page = None
if not os.path.isfile(index_page.finput):
raise IOError, "index.txt isn't found in %s" % self.current_dir
self.pages.sort(lambda a, b: a.headers['pubDate'] - b.headers['pubDate'], reverse=True)
entries = []
# first pass
for page in self.pages:
entry = {
"title": page.headers.get('title', page.filename),
"description": page.headers['description'],
"link": page.url,
"pubDate": page.headers['pubDate'],
"date": page.headers['date']
}
self.site.feed.append(entry)
entries.append(entry)
self.pages.append(index_page)
if archives_page is not None:
self.pages.append(archives_page)
# second pass : render pages
for page in self.pages:
page.headers['entries'] = entries
try:
f = codecs.open(page.foutput, 'w', 'utf-8')
try:
f.write(page.render())
finally:
f.close()
except (IOError, OSError), err:
raise
self.site.sitemap.append(page)
class Page(object):
content_types = {
'html': 'text/html',
'markdown': 'text/html',
'textile': 'text/html',
'text': 'text/plain'
}
files_ext = {
'html': 'html',
'markdown': 'html',
'textile': 'html',
'text': 'txt'
}
def __init__(self, site, filename, current_dir, target_path):
self.filename = filename
self.current_dir = current_dir
self.target_path = target_path
self.finput = os.path.join(current_dir, filename)
self.parsed = False
self.foutput = ''
self.site = site
self.headers = {}
self.body = ""
self.parse()
def get_url(self):
rel_path = self.foutput.split(conf.OUTPUT_PATH)[1]
if rel_path.startswith('/'):
rel_path = rel_path[1:]
return "/".join([self.site.site_url, rel_path])
def parse(self):
with open(self.finput, 'r') as f:
headers = {}
raw = f.read()
try:
(header_lines,body) = raw.split("\n\n", 1)
for header in header_lines.split("\n"):
(name, value) = header.split(": ", 1)
headers[name.lower()] = unicode(value.strip())
self.headers = headers
self.headers['pubDate'] = os.stat(self.finput)[ST_CTIME]
self.headers['published'] = datetime.datetime.fromtimestamp(self.headers['pubDate'])
self.body = body
content_type = self.headers.get('content_type', conf.CONTENT_TYPE)
if content_type in self.content_types.keys():
self.foutput = os.path.join(self.target_path,
"%s.%s" % (os.path.splitext(self.filename)[0], self.files_ext[content_type]))
self.url = self.get_url()
else:
raise TypeError, "Unknown content_type"
except:
raise TypeError, "Invalid page file format for %s" % self.finput
self.parsed = True
def is_blog(self):
if not 'page_type' in self.headers:
return False
return (self.headers['page_type'] == "blog")
def render(self):
if not self.parsed:
self.parse()
template = self.headers.get('template', conf.DEFAULT_TEMPLATE)
content_type = self.headers.get('content_type', conf.CONTENT_TYPE)
if content_type in self.content_types.keys():
fun = getattr(self, "render_%s" % content_type)
return fun(template)
else:
raise TypeError, "Unknown content_type"
def _render_html(self, template, body):
kwargs = {
"body": body,
"sitename": conf.SITE_NAME,
"siteurl": conf.SITE_URL,
"url": self.url
}
kwargs.update(self.headers)
return render_template(template, **kwargs)
def render_html(self, template):
return self._render_html(template, self.body)
def render_markdown(self, template):
if markdown is None:
raise TypeError, "markdown isn't suported"
body = convert_markdown(self.body)
return self._render_html(template, body)
def render_textile(self, template):
if textile is None:
raise TypeError, "textile isn't suported"
body = convert_textile(self.body)
return self._render_html(template, body)
def render_text(self, template):
return self.body
def main():
site = Site()
site.render()
if __name__ == "__main__":
main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_list
from nova.compute import multi_cell_list
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures
FAKE_CELLS = [objects.CellMapping(), objects.CellMapping()]
class TestInstanceList(test.NoDBTestCase):
def setUp(self):
super(TestInstanceList, self).setUp()
cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i),
name='cell%i' % i,
transport_url='fake:///',
database_connection='fake://')
for i in range(0, 3)]
insts = {}
for cell in cells:
insts[cell.uuid] = list([
dict(
uuid=getattr(uuids, '%s-inst%i' % (cell.name, i)),
hostname='%s-inst%i' % (cell.name, i))
for i in range(0, 3)])
self.cells = cells
self.insts = insts
self.context = nova_context.RequestContext()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
self.flags(instance_list_cells_batch_strategy='fixed', group='api')
def test_compare_simple_instance_quirks(self):
# Ensure uuid,asc is added
ctx = instance_list.InstanceSortContext(['key0'], ['asc'])
self.assertEqual(['key0', 'uuid'], ctx.sort_keys)
self.assertEqual(['asc', 'asc'], ctx.sort_dirs)
# Ensure defaults are added
ctx = instance_list.InstanceSortContext(None, None)
self.assertEqual(['created_at', 'id', 'uuid'], ctx.sort_keys)
self.assertEqual(['desc', 'desc', 'asc'], ctx.sort_dirs)
@mock.patch('nova.db.api.instance_get_all_by_filters_sort')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_get_instances_sorted(self, mock_cells, mock_inst):
mock_cells.return_value = self.cells
insts_by_cell = self.insts.values()
mock_inst.side_effect = insts_by_cell
obj, insts = instance_list.get_instances_sorted(self.context, {},
None, None, [],
['hostname'], ['asc'])
insts_one = [inst['hostname'] for inst in insts]
# Reverse the order that we get things from the cells so we can
# make sure that the result is still sorted the same way
insts_by_cell = list(reversed(list(insts_by_cell)))
mock_inst.reset_mock()
mock_inst.side_effect = insts_by_cell
obj, insts = instance_list.get_instances_sorted(self.context, {},
None, None, [],
['hostname'], ['asc'])
insts_two = [inst['hostname'] for inst in insts]
self.assertEqual(insts_one, insts_two)
@mock.patch('nova.objects.BuildRequestList.get_by_filters')
@mock.patch('nova.compute.instance_list.get_instances_sorted')
@mock.patch('nova.objects.CellMappingList.get_by_project_id')
def test_user_gets_subset_of_cells(self, mock_cm, mock_gi, mock_br):
self.flags(instance_list_per_project_cells=True, group='api')
mock_gi.return_value = instance_list.InstanceLister(None, None), []
mock_br.return_value = []
user_context = nova_context.RequestContext('fake', 'fake')
instance_list.get_instance_objects_sorted(
user_context, {}, None, None, [], None, None)
mock_gi.assert_called_once_with(user_context, {}, None, None, [],
None, None,
cell_mappings=mock_cm.return_value,
batch_size=1000,
cell_down_support=False)
@mock.patch('nova.context.CELLS', new=FAKE_CELLS)
@mock.patch('nova.context.load_cells')
@mock.patch('nova.objects.BuildRequestList.get_by_filters')
@mock.patch('nova.compute.instance_list.get_instances_sorted')
@mock.patch('nova.objects.CellMappingList.get_by_project_id')
def test_admin_gets_all_cells(self, mock_cm, mock_gi, mock_br, mock_lc):
mock_gi.return_value = instance_list.InstanceLister(None, None), []
mock_br.return_value = []
admin_context = nova_context.RequestContext('fake', 'fake',
is_admin=True)
instance_list.get_instance_objects_sorted(
admin_context, {}, None, None, [], None, None)
mock_gi.assert_called_once_with(admin_context, {}, None, None, [],
None, None,
cell_mappings=FAKE_CELLS,
batch_size=100,
cell_down_support=False)
mock_cm.assert_not_called()
mock_lc.assert_called_once_with()
@mock.patch('nova.context.CELLS', new=FAKE_CELLS)
@mock.patch('nova.context.load_cells')
@mock.patch('nova.objects.BuildRequestList.get_by_filters')
@mock.patch('nova.compute.instance_list.get_instances_sorted')
@mock.patch('nova.objects.CellMappingList.get_by_project_id')
def test_user_gets_all_cells(self, mock_cm, mock_gi, mock_br, mock_lc):
self.flags(instance_list_per_project_cells=False, group='api')
mock_gi.return_value = instance_list.InstanceLister(None, None), []
mock_br.return_value = []
user_context = nova_context.RequestContext('fake', 'fake')
instance_list.get_instance_objects_sorted(
user_context, {}, None, None, [], None, None)
mock_gi.assert_called_once_with(user_context, {}, None, None, [],
None, None,
cell_mappings=FAKE_CELLS,
batch_size=100,
cell_down_support=False)
mock_lc.assert_called_once_with()
@mock.patch('nova.context.CELLS', new=FAKE_CELLS)
@mock.patch('nova.context.load_cells')
@mock.patch('nova.objects.BuildRequestList.get_by_filters')
@mock.patch('nova.compute.instance_list.get_instances_sorted')
@mock.patch('nova.objects.CellMappingList.get_by_project_id')
def test_admin_gets_all_cells_anyway(self, mock_cm, mock_gi, mock_br,
mock_lc):
self.flags(instance_list_per_project_cells=True, group='api')
mock_gi.return_value = instance_list.InstanceLister(None, None), []
mock_br.return_value = []
admin_context = nova_context.RequestContext('fake', 'fake',
is_admin=True)
instance_list.get_instance_objects_sorted(
admin_context, {}, None, None, [], None, None)
mock_gi.assert_called_once_with(admin_context, {}, None, None, [],
None, None,
cell_mappings=FAKE_CELLS,
batch_size=100,
cell_down_support=False)
mock_cm.assert_not_called()
mock_lc.assert_called_once_with()
@mock.patch('nova.context.scatter_gather_cells')
def test_get_instances_with_down_cells(self, mock_sg):
inst_cell0 = self.insts[uuids.cell0]
# storing the uuids of the instances from the up cell
uuid_initial = [inst['uuid'] for inst in inst_cell0]
def wrap(thing):
return multi_cell_list.RecordWrapper(ctx, self.context, thing)
ctx = nova_context.RequestContext()
instances = [wrap(inst) for inst in inst_cell0]
# creating one up cell and two down cells
ret_val = {}
ret_val[uuids.cell0] = instances
ret_val[uuids.cell1] = [wrap(exception.BuildRequestNotFound(uuid='f'))]
ret_val[uuids.cell2] = [wrap(nova_context.did_not_respond_sentinel)]
mock_sg.return_value = ret_val
obj, res = instance_list.get_instances_sorted(self.context, {}, None,
None, [], None, None)
uuid_final = [inst['uuid'] for inst in res]
# return the results from the up cell, ignoring the down cell.
self.assertEqual(uuid_initial, uuid_final)
@mock.patch('nova.context.scatter_gather_cells')
def test_get_instances_by_not_skipping_down_cells(self, mock_sg):
self.flags(list_records_by_skipping_down_cells=False, group='api')
inst_cell0 = self.insts[uuids.cell0]
def wrap(thing):
return multi_cell_list.RecordWrapper(ctx, self.context, thing)
ctx = nova_context.RequestContext()
instances = [wrap(inst) for inst in inst_cell0]
# creating one up cell and two down cells
ret_val = {}
ret_val[uuids.cell0] = instances
ret_val[uuids.cell1] = [wrap(exception.BuildRequestNotFound(uuid='f'))]
ret_val[uuids.cell2] = [wrap(nova_context.did_not_respond_sentinel)]
mock_sg.return_value = ret_val
# Raises exception if a cell is down without skipping them
# as CONF.api.list_records_by_skipping_down_cells is set to False.
# This would in turn result in an API 500 internal error.
exp = self.assertRaises(exception.NovaException,
instance_list.get_instance_objects_sorted, self.context, {}, None,
None, [], None, None)
self.assertIn('configuration indicates', str(exp))
@mock.patch('nova.context.scatter_gather_cells')
def test_get_instances_with_cell_down_support(self, mock_sg):
self.flags(list_records_by_skipping_down_cells=False, group='api')
inst_cell0 = self.insts[uuids.cell0]
# storing the uuids of the instances from the up cell
uuid_initial = [inst['uuid'] for inst in inst_cell0]
def wrap(thing):
return multi_cell_list.RecordWrapper(ctx, self.context, thing)
ctx = nova_context.RequestContext()
instances = [wrap(inst) for inst in inst_cell0]
# creating one up cell and two down cells
ret_val = {}
ret_val[uuids.cell0] = instances
ret_val[uuids.cell1] = [wrap(exception.BuildRequestNotFound(uuid='f'))]
ret_val[uuids.cell2] = [wrap(nova_context.did_not_respond_sentinel)]
mock_sg.return_value = ret_val
# From the new microversion (2.68) if cell_down_support is True
# then CONF.api.list_records_by_skipping_down_cells will be ignored.
# Exception will not be raised even if its False.
obj, res = instance_list.get_instances_sorted(self.context, {}, None,
None, [], None, None,
cell_down_support=True)
uuid_final = [inst['uuid'] for inst in res]
# return the results from the up cell, ignoring the down cell and
# constructing partial results later.
self.assertEqual(uuid_initial, uuid_final)
def test_batch_size_fixed(self):
fixed_size = 200
self.flags(instance_list_cells_batch_strategy='fixed', group='api')
self.flags(instance_list_cells_batch_fixed_size=fixed_size,
group='api')
# We call the batch size calculator with various arguments, including
# lists of cells which are just counted, so the cardinality is all that
# matters.
# One cell, so batch at $limit
ret = instance_list.get_instance_list_cells_batch_size(
1000, [mock.sentinel.cell1])
self.assertEqual(1000, ret)
# Two cells, so batch at $fixed_size
ret = instance_list.get_instance_list_cells_batch_size(
1000, [mock.sentinel.cell1, mock.sentinel.cell2])
self.assertEqual(fixed_size, ret)
# Four cells, so batch at $fixed_size
ret = instance_list.get_instance_list_cells_batch_size(
1000, [mock.sentinel.cell1, mock.sentinel.cell2,
mock.sentinel.cell3, mock.sentinel.cell4])
self.assertEqual(fixed_size, ret)
# Three cells, tiny limit, so batch at lower threshold
ret = instance_list.get_instance_list_cells_batch_size(
10, [mock.sentinel.cell1,
mock.sentinel.cell2,
mock.sentinel.cell3])
self.assertEqual(100, ret)
# Three cells, limit above floor, so batch at limit
ret = instance_list.get_instance_list_cells_batch_size(
110, [mock.sentinel.cell1,
mock.sentinel.cell2,
mock.sentinel.cell3])
self.assertEqual(110, ret)
def test_batch_size_distributed(self):
self.flags(instance_list_cells_batch_strategy='distributed',
group='api')
# One cell, so batch at $limit
ret = instance_list.get_instance_list_cells_batch_size(1000, [1])
self.assertEqual(1000, ret)
# Two cells so batch at ($limit/2)+10%
ret = instance_list.get_instance_list_cells_batch_size(1000, [1, 2])
self.assertEqual(550, ret)
# Four cells so batch at ($limit/4)+10%
ret = instance_list.get_instance_list_cells_batch_size(1000, [1, 2,
3, 4])
self.assertEqual(275, ret)
# Three cells, tiny limit, so batch at lower threshold
ret = instance_list.get_instance_list_cells_batch_size(10, [1, 2, 3])
self.assertEqual(100, ret)
# Three cells, small limit, so batch at lower threshold
ret = instance_list.get_instance_list_cells_batch_size(110, [1, 2, 3])
self.assertEqual(100, ret)
# No cells, so batch at $limit
ret = instance_list.get_instance_list_cells_batch_size(1000, [])
self.assertEqual(1000, ret)
class TestInstanceListBig(test.NoDBTestCase):
def setUp(self):
super(TestInstanceListBig, self).setUp()
cells = [objects.CellMapping(uuid=getattr(uuids, 'cell%i' % i),
name='cell%i' % i,
transport_url='fake:///',
database_connection='fake://')
for i in range(0, 3)]
insts = list([
dict(
uuid=getattr(uuids, 'inst%i' % i),
hostname='inst%i' % i)
for i in range(0, 100)])
self.cells = cells
self.insts = insts
self.context = nova_context.RequestContext()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
@mock.patch('nova.db.api.instance_get_all_by_filters_sort')
@mock.patch('nova.objects.CellMappingList.get_all')
def test_get_instances_batched(self, mock_cells, mock_inst):
mock_cells.return_value = self.cells
def fake_get_insts(ctx, filters, limit, *a, **k):
for i in range(0, limit):
yield self.insts.pop()
mock_inst.side_effect = fake_get_insts
obj, insts = instance_list.get_instances_sorted(self.context, {},
50, None, [],
['hostname'], ['desc'],
batch_size=10)
# Make sure we returned exactly how many were requested
insts = list(insts)
self.assertEqual(50, len(insts))
# Since the instances are all uniform, we should have a
# predictable number of queries to the database. 5 queries
# would get us 50 results, plus one more gets triggered by the
# sort to fill the buffer for the first cell feeder that runs
# dry.
self.assertEqual(6, mock_inst.call_count)
|
|
"""
Module for creating Three.js geometry JSON nodes.
"""
import os
from .. import constants, logger
from . import base_classes, io, api
FORMAT_VERSION = 3
class Geometry(base_classes.BaseNode):
"""Class that wraps a single mesh/geometry node."""
def __init__(self, node, parent=None):
logger.debug("Geometry().__init__(%s)", node)
# @TODO: maybe better to have `three` constants for
# strings that are specific to `three` properties
geo_type = constants.GEOMETRY.title()
if parent.options.get(constants.GEOMETRY_TYPE):
opt_type = parent.options[constants.GEOMETRY_TYPE]
if opt_type == constants.BUFFER_GEOMETRY:
geo_type = constants.BUFFER_GEOMETRY
elif opt_type != constants.GEOMETRY:
logger.error("Unknown geometry type %s", opt_type)
logger.info("Setting %s to '%s'", node, geo_type)
self._defaults[constants.TYPE] = geo_type
base_classes.BaseNode.__init__(self, node,
parent=parent,
type=geo_type)
@property
def animation_filename(self):
"""Calculate the file name for the animation file
:return: base name for the file
"""
compression = self.options.get(constants.COMPRESSION)
if compression in (None, constants.NONE):
ext = constants.JSON
elif compression == constants.MSGPACK:
ext = constants.PACK
key = ''
for key in (constants.MORPH_TARGETS, constants.ANIMATION):
if key in self.keys():
break
else:
logger.info("%s has no animation data", self.node)
return
return '%s.%s.%s' % (self.node, key, ext)
@property
def face_count(self):
"""Parse the bit masks of the `faces` array.
:rtype: int
"""
try:
faces = self[constants.FACES]
except KeyError:
logger.debug("No parsed faces found")
return 0
length = len(faces)
offset = 0
def bitset(bit, mask):
"""
:type bit: int
:type mask: int
"""
return bit & (1 << mask)
face_count = 0
masks = (constants.MASK[constants.UVS],
constants.MASK[constants.NORMALS],
constants.MASK[constants.COLORS])
while offset < length:
bit = faces[offset]
offset += 1
face_count += 1
is_quad = bitset(bit, constants.MASK[constants.QUAD])
vector = 4 if is_quad else 3
offset += vector
if bitset(bit, constants.MASK[constants.MATERIALS]):
offset += 1
for mask in masks:
if bitset(bit, mask):
offset += vector
return face_count
@property
def metadata(self):
"""Metadata for the current node.
:rtype: dict
"""
metadata = {
constants.GENERATOR: constants.THREE,
constants.VERSION: FORMAT_VERSION
}
if self[constants.TYPE] == constants.GEOMETRY.title():
self._geometry_metadata(metadata)
else:
self._buffer_geometry_metadata(metadata)
return metadata
def copy(self, scene=True):
"""Copy the geometry definitions to a standard dictionary.
:param scene: toggle for scene formatting
(Default value = True)
:type scene: bool
:rtype: dict
"""
logger.debug("Geometry().copy(scene=%s)", scene)
dispatch = {
True: self._scene_format,
False: self._geometry_format
}
data = dispatch[scene]()
try:
data[constants.MATERIALS] = self[constants.MATERIALS].copy()
except KeyError:
logger.debug("No materials to copy")
return data
def copy_textures(self, texture_folder=''):
"""Copy the textures to the destination directory."""
logger.debug("Geometry().copy_textures()")
if self.options.get(constants.COPY_TEXTURES):
texture_registration = self.register_textures()
if texture_registration:
logger.info("%s has registered textures", self.node)
dirname = os.path.dirname(self.scene.filepath)
full_path = os.path.join(dirname, texture_folder)
io.copy_registered_textures(
full_path, texture_registration)
def parse(self):
"""Parse the current node"""
logger.debug("Geometry().parse()")
if self[constants.TYPE] == constants.GEOMETRY.title():
logger.info("Parsing Geometry format")
self._parse_geometry()
else:
logger.info("Parsing BufferGeometry format")
self._parse_buffer_geometry()
def register_textures(self):
"""Obtain a texture registration object.
:rtype: dict
"""
logger.debug("Geometry().register_textures()")
return api.mesh.texture_registration(self.node)
def write(self, filepath=None):
"""Write the geometry definitions to disk. Uses the
desitnation path of the scene.
:param filepath: optional output file path
(Default value = None)
:type filepath: str
"""
logger.debug("Geometry().write(filepath=%s)", filepath)
filepath = filepath or self.scene.filepath
io.dump(filepath, self.copy(scene=False),
options=self.scene.options)
if self.options.get(constants.MAPS):
logger.info("Copying textures for %s", self.node)
self.copy_textures()
def write_animation(self, filepath):
"""Write the animation definitions to a separate file
on disk. This helps optimize the geometry file size.
:param filepath: destination path
:type filepath: str
"""
logger.debug("Geometry().write_animation(%s)", filepath)
for key in (constants.MORPH_TARGETS, constants.ANIMATION):
try:
data = self[key]
break
except KeyError:
pass
else:
logger.info("%s has no animation data", self.node)
return
filepath = os.path.join(filepath, self.animation_filename)
if filepath:
logger.info("Dumping animation data to %s", filepath)
io.dump(filepath, data, options=self.scene.options)
return filepath
else:
logger.warning("Could not determine a filepath for "
"animation data. Nothing written to disk.")
def _component_data(self):
"""Query the component data only
:rtype: dict
"""
logger.debug("Geometry()._component_data()")
if self[constants.TYPE] != constants.GEOMETRY.title():
return self[constants.ATTRIBUTES]
components = [constants.VERTICES, constants.FACES,
constants.UVS, constants.COLORS,
constants.NORMALS, constants.BONES,
constants.SKIN_WEIGHTS,
constants.SKIN_INDICES, constants.NAME,
constants.INFLUENCES_PER_VERTEX,
constants.INDEX]
data = {}
anim_components = [constants.MORPH_TARGETS, constants.ANIMATION]
if self.options.get(constants.EMBED_ANIMATION):
components.extend(anim_components)
else:
for component in anim_components:
try:
self[component]
except KeyError:
pass
else:
data[component] = os.path.basename(
self.animation_filename)
break
else:
logger.info("No animation data found for %s", self.node)
option_extra_vgroups = self.options.get(constants.EXTRA_VGROUPS)
for name, index in api.mesh.extra_vertex_groups(self.node,
option_extra_vgroups):
components.append(name)
for component in components:
try:
data[component] = self[component]
except KeyError:
logger.debug("Component %s not found", component)
return data
def _geometry_format(self):
"""Three.Geometry formatted definitions
:rtype: dict
"""
data = self._component_data()
if self[constants.TYPE] != constants.GEOMETRY.title():
data = {
constants.DATA: {constants.ATTRIBUTES: data}
}
data[constants.METADATA] = {
constants.TYPE: self[constants.TYPE]
}
data[constants.METADATA].update(self.metadata)
draw_calls = self.get(constants.DRAW_CALLS)
if draw_calls is not None:
data[constants.DRAW_CALLS] = draw_calls
return data
def _buffer_geometry_metadata(self, metadata):
"""Three.BufferGeometry metadata
:rtype: dict
"""
for key, value in self[constants.ATTRIBUTES].items():
size = value[constants.ITEM_SIZE]
array = value[constants.ARRAY]
metadata[key] = len(array)/size
def _geometry_metadata(self, metadata):
"""Three.Geometry metadata
:rtype: dict
"""
skip = (constants.TYPE, constants.FACES, constants.UUID,
constants.ANIMATION, constants.SKIN_INDICES,
constants.SKIN_WEIGHTS, constants.NAME,
constants.INFLUENCES_PER_VERTEX)
vectors = (constants.VERTICES, constants.NORMALS)
for key in self.keys():
if key in vectors:
try:
metadata[key] = int(len(self[key])/3)
except KeyError:
pass
continue
if key in skip:
continue
metadata[key] = len(self[key])
faces = self.face_count
if faces > 0:
metadata[constants.FACES] = faces
def _scene_format(self):
"""Format the output for Scene compatability
:rtype: dict
"""
data = {
constants.UUID: self[constants.UUID],
constants.TYPE: self[constants.TYPE]
}
component_data = self._component_data()
if self[constants.TYPE] == constants.GEOMETRY.title():
data[constants.DATA] = component_data
data[constants.DATA].update({
constants.METADATA: self.metadata
})
else:
geometry_data = data
if self.options.get(constants.EMBED_GEOMETRY, True):
data[constants.DATA] = geometry_data = {}
geometry_data[constants.ATTRIBUTES] = component_data
index = self.get(constants.INDEX)
if index is not None:
geometry_data[constants.INDEX] = index
draw_calls = self.get(constants.DRAW_CALLS)
if draw_calls is not None:
geometry_data[constants.DRAW_CALLS] = draw_calls
data[constants.METADATA] = self.metadata
data[constants.NAME] = self[constants.NAME]
return data
def _parse_buffer_geometry(self):
"""Parse the geometry to Three.BufferGeometry specs"""
self[constants.ATTRIBUTES] = {}
options_vertices = self.options.get(constants.VERTICES)
option_normals = self.options.get(constants.NORMALS)
option_uvs = self.options.get(constants.UVS)
option_extra_vgroups = self.options.get(constants.EXTRA_VGROUPS)
option_index_type = self.options.get(constants.INDEX_TYPE)
pos_tuple = (constants.POSITION, options_vertices,
api.mesh.buffer_position, 3)
uvs_tuple = (constants.UV, option_uvs,
api.mesh.buffer_uv, 2)
normals_tuple = (constants.NORMAL, option_normals,
api.mesh.buffer_normal, 3)
dispatch = (pos_tuple, uvs_tuple, normals_tuple)
for key, option, func, size in dispatch:
if not option:
continue
array = func(self.node) or []
if not array:
logger.warning("No array could be made for %s", key)
continue
self[constants.ATTRIBUTES][key] = {
constants.ITEM_SIZE: size,
constants.TYPE: constants.FLOAT_32,
constants.ARRAY: array
}
for name, index in api.mesh.extra_vertex_groups(self.node,
option_extra_vgroups):
logger.info("Exporting extra vertex group %s", name)
array = api.mesh.buffer_vertex_group_data(self.node, index)
if not array:
logger.warning("No array could be made for %s", name)
continue
self[constants.ATTRIBUTES][name] = {
constants.ITEM_SIZE: 1,
constants.TYPE: constants.FLOAT_32,
constants.ARRAY: array
}
if option_index_type != constants.NONE:
assert(not (self.get(constants.INDEX) or
self.get(constants.DRAW_CALLS)))
indices_per_face = 3
index_threshold = 0xffff - indices_per_face
if option_index_type == constants.UINT_32:
index_threshold = 0x7fffffff - indices_per_face
attrib_data_in, attrib_data_out, attrib_keys = [], [], []
i = 0
for key, entry in self[constants.ATTRIBUTES].items():
item_size = entry[constants.ITEM_SIZE]
attrib_keys.append(key)
attrib_data_in.append((entry[constants.ARRAY], item_size))
attrib_data_out.append(([], i, i + item_size))
i += item_size
index_data, draw_calls = [], []
indexed, flush_req, base_vertex = {}, False, 0
assert(len(attrib_data_in) > 0)
array, item_size = attrib_data_in[0]
i, n = 0, len(array) / item_size
while i < n:
vertex_data = ()
for array, item_size in attrib_data_in:
vertex_data += tuple(
array[i * item_size:(i + 1) * item_size])
vertex_index = indexed.get(vertex_data)
if vertex_index is None:
vertex_index = len(indexed)
flush_req = vertex_index >= index_threshold
indexed[vertex_data] = vertex_index
for array, i_from, i_to in attrib_data_out:
array.extend(vertex_data[i_from:i_to])
index_data.append(vertex_index)
i += 1
if i == n:
flush_req = len(draw_calls) > 0
assert(i % indices_per_face == 0)
if flush_req and i % indices_per_face == 0:
start, count = 0, len(index_data)
if draw_calls:
prev = draw_calls[-1]
start = (prev[constants.DC_START] +
prev[constants.DC_COUNT])
count -= start
draw_calls.append({
constants.DC_START: start,
constants.DC_COUNT: count,
constants.DC_INDEX: base_vertex
})
base_vertex += len(indexed)
indexed.clear()
flush_req = False
for i, key in enumerate(attrib_keys):
array = attrib_data_out[i][0]
self[constants.ATTRIBUTES][key][constants.ARRAY] = array
self[constants.INDEX] = {
constants.ITEM_SIZE: 1,
constants.TYPE: option_index_type,
constants.ARRAY: index_data
}
if (draw_calls):
logger.info("draw_calls = %s", repr(draw_calls))
self[constants.DRAW_CALLS] = draw_calls
def _parse_geometry(self):
"""Parse the geometry to Three.Geometry specs"""
if self.options.get(constants.VERTICES):
logger.info("Parsing %s", constants.VERTICES)
self[constants.VERTICES] = api.mesh.vertices(self.node) or []
if self.options.get(constants.NORMALS):
logger.info("Parsing %s", constants.NORMALS)
self[constants.NORMALS] = api.mesh.normals(self.node) or []
if self.options.get(constants.COLORS):
logger.info("Parsing %s", constants.COLORS)
self[constants.COLORS] = api.mesh.vertex_colors(
self.node) or []
if self.options.get(constants.FACE_MATERIALS):
logger.info("Parsing %s", constants.FACE_MATERIALS)
self[constants.MATERIALS] = api.mesh.materials(
self.node, self.options) or []
if self.options.get(constants.UVS):
logger.info("Parsing %s", constants.UVS)
self[constants.UVS] = api.mesh.uvs(self.node) or []
if self.options.get(constants.FACES):
logger.info("Parsing %s", constants.FACES)
material_list = self.get(constants.MATERIALS)
self[constants.FACES] = api.mesh.faces(
self.node, self.options, material_list=material_list) or []
no_anim = (None, False, constants.OFF)
if self.options.get(constants.ANIMATION) not in no_anim:
logger.info("Parsing %s", constants.ANIMATION)
self[constants.ANIMATION] = api.mesh.skeletal_animation(
self.node, self.options) or []
# @TODO: considering making bones data implied when
# querying skinning data
bone_map = {}
if self.options.get(constants.BONES):
logger.info("Parsing %s", constants.BONES)
bones, bone_map = api.mesh.bones(self.node, self.options)
self[constants.BONES] = bones
if self.options.get(constants.SKINNING):
logger.info("Parsing %s", constants.SKINNING)
influences = self.options.get(
constants.INFLUENCES_PER_VERTEX, 2)
self[constants.INFLUENCES_PER_VERTEX] = influences
self[constants.SKIN_INDICES] = api.mesh.skin_indices(
self.node, bone_map, influences) or []
self[constants.SKIN_WEIGHTS] = api.mesh.skin_weights(
self.node, bone_map, influences) or []
if self.options.get(constants.MORPH_TARGETS):
logger.info("Parsing %s", constants.MORPH_TARGETS)
self[constants.MORPH_TARGETS] = api.mesh.morph_targets(
self.node, self.options) or []
# In the moment there is no way to add extra data to a Geomtry in
# Three.js. In case there is some day, here is the code:
#
# option_extra_vgroups = self.options.get(constants.EXTRA_VGROUPS)
#
# for name, index in api.mesh.extra_vertex_groups(self.node,
# option_extra_vgroups):
#
# logger.info("Exporting extra vertex group %s", name)
# self[name] = api.mesh.vertex_group_data(self.node, index)
|
|
# -*- coding: utf-8 -*-
# coding=utf-8
# Copyright 2019 The SGNMT Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains ``SimpleTrie`` which is a generic trie
implementation based on strings of integers.
"""
from operator import itemgetter
class SimpleNode:
"""Helper class representing a node in a ``SimpleTrie`` """
def __init__(self):
"""Creates an empty node without children. """
self.edges = {} # outgoing edges with terminal symbols
self.element = None # Elements stored at this node
class SimpleTrie:
"""This is a very simple Trie implementation. It is simpler than
the one in ``cam.sgnmt.predictors.grammar`` because it does not
support non-terminals or removal. The only supported operations are
``add`` and ``get``, but those are implemented very efficiently.
For many applications (e.g. the cache in the greedy heuristic) this
is already enough.
The implementation also supports keys in sparse representation,
in which most of the elements in the sequence are zero (see
``add_sparse``, ``get_sparse``, and ``nearest_sparse``. In this
case, the key is a list of tuples [(dim1,val1),...(dimN,valN)].
Internally, we store them as sequence "dim1 val1 dim2 val2..."
Note that we assume that the tuples are ordered by dimension!
"""
def __init__(self):
"""Creates an empty Trie data structure. """
self.root = SimpleNode()
def _get_node(self, seq):
"""Get the ```SimpleNode``` for the given sequence ``seq``. If
the path for ``seq`` does not exist yet in the Trie, add it and
return a reference to the newly created node. """
cur_node = self.root
for token_id in seq:
children = cur_node.edges
if not token_id in children:
children[token_id] = SimpleNode()
cur_node = children[token_id]
return cur_node
def add(self, seq, element):
"""Add an element to the Trie for the key ``seq``. If ``seq``
already exists, override.
Args:
seq (list): Key
element (object): The object to store for key ``seq``
"""
self._get_node(seq).element = element
def get(self, seq):
"""Retrieve the element for a key ``seq``.
Args:
seq (list): Query key
Returns:
object. The element which has been added along with ``seq``
or ``None`` if the key does not exist.
"""
return self._get_node(seq).element
def get_prefix(self, seq):
"""Get the key in the Trie with the longest common prefix with
``seq``.
Args:
seq (list): Query sequence
Returns:
list. The longest key in the Trie which is a prefix of
``seq``.
"""
cur_node = self.root
prefix = []
best_prefix = []
for token_id in seq:
children = cur_node.edges
if not token_id in children:
break
prefix.append(token_id)
cur_node = children[token_id]
if cur_node.element:
best_prefix = list(prefix)
return best_prefix
def _sparse2seq(self, key):
"""Transforms a key in sparse representation to a sequence
which can be used as key in the Trie.
"""
seq = []
for (d,v) in key:
seq.append(d)
seq.append(v)
return seq
def add_sparse(self, key, element):
"""Adds an element with a key in sparse representation.
Args:
seq (list): Sparse key (list of tuples)
element (object): The object to store for key ``seq``
"""
self.add(self._sparse2seq(key), element)
def get_sparse(self, key, element):
"""Retrieves an element with a key in sparse representation.
Args:
seq (list). Query key in sparse format
Returns:
object. The element which has been added along with ``seq``
or ``None`` if the key does not exist.
"""
return self.get(self._sparse2seq(key), element)
def nearest_sparse(self, query):
"""This method returns the element in the Trie with the closest
key to ``query`` in terms of Euclidean distance. The efficiency
relies on sparseness: The more zeros in the vector, the more
efficient. If the Trie contains an exact match, this method
runs linear in the length of the query (i.e. independent of
number of elements in the Trie).
Args:
query (list): Query key in sparse format
Returns:
Tuple. (object,dist) pair with the nearest element to
``query`` in terms of L2 norm and the squared L2 distance.
"""
self.best_dist = float("inf")
self.best_element = None
self._register_best_element = self._register_best_element_single
self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)
return self.best_element,self.best_dist
def n_nearest_sparse(self, query, n=1):
"""This method returns the n element in the Trie with the closest
key to ``query`` in terms of Euclidean distance. The efficiency
relies on sparseness: The more zeros in the vector, the more
efficient.
Args:
query (list): Query key in sparse format
n (int): Number of elements to retrieve
Returns:
List. List of (object,dist) pairs with the nearest element to
``query`` in terms of L2 norm and the squared L2 distance.
"""
if n <= 1:
return [self.nearest_sparse(query)]
self.best_dist = float("inf")
self.best_elements = [(None, self.best_dist)] # guardian element
self.n = n
self._register_best_element = self._register_best_element_multi
self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)
return self.best_elements
def _register_best_element_single(self, dist, el):
self.best_dist = dist
self.best_element = el
def _register_best_element_multi(self, dist, el):
self.best_elements = self.best_elements[:self.n-1] + [(el, dist)]
self.best_elements.sort(key=itemgetter(1))
self.best_dist = self.best_elements[-1][1]
def _nearest_sparse_recursive(self, seq, root, dist):
if dist > self.best_dist:
return
if not seq:
self._dfs_for_nearest(root, dist)
return
if root.element:
add_dist = sum([seq[idx]**2 for idx in range(1, len(seq), 2)])
if dist + add_dist < self.best_dist:
self._register_best_element(dist + add_dist, root.element)
dim = seq[0]
# Explore close matches first
children = sorted(root.edges, key=lambda el: (el-dim)**2)
for child_dim in children:
child_node = root.edges[child_dim]
next_seq = seq[0:]
next_dist = dist
try:
while child_dim > next_seq[0]:
next_dist += next_seq[1]**2
next_seq = next_seq[2:]
if child_dim == next_seq[0]: # Exact match :)
c_discount = next_seq[1]
next_seq = next_seq[2:]
else:
c_discount = 0.0
for c,node in child_node.edges.items():
self._nearest_sparse_recursive(next_seq,
node,
next_dist+(c-c_discount)**2)
except IndexError:
for c,node in child_node.edges.items():
self._dfs_for_nearest(node, next_dist + c*c)
def _dfs_for_nearest(self, root, dist):
"""Scans the subtree under ``root`` for nearest elements.
``dist`` is the distance which has already been
accumulated.
"""
if dist > self.best_dist:
return
if root.element:
self._register_best_element(dist, root.element)
return
for child in root.edges.values():
for c,next_child in child.edges.items():
self._dfs_for_nearest(next_child, dist + c*c)
|
|
import lib.spell
from dateutil import relativedelta as rdelta
class Templates(object):
current = "Curently, it's %s and %s. "
forecast_parts = (
"By the %(period)s, the weather will turn %(description)s. ",
"The %(period)s will be %(description)s. ",
"It looks like the %(period)s of %(date)s will be %(description)s. ",
) # Backwards so we can pop()
class Forecast(object):
# Definitions are based on http://www.wrh.noaa.gov/mtr/glossary.php
terms = {
'cloud': [
(12, 'CLEAR'),
(24, 'MOSTLY CLEAR'),
(60, 'PARTLY CLEAR'),
(84, 'MOSTLY CLOUDY'),
(float('inf'), 'CLOUDY'),
],
'wind': [
(00.44, ''),
(02.24, 'LIGHTLY WINDY'),
(06.71, 'SLIGHTLY WINDY'),
(11.18, 'BREEZY'),
(15.65, 'WINDY'),
(17.88, 'VERY WINDY'),
(float('inf'), 'EXTREMELY WINDY'),
],
'chance': [
(20, 'slight chance of'),
(40, 'chance of'),
(80, 'likely chance of'),
(float('inf'), ''),
],
'intensity': [
(0.76, 'VERY LIGHT'),
(7.62, 'LIGHT'),
(22.9, 'MODERATE'),
(float('inf'), 'HEAVY'),
],
}
def __new__(self, data):
# Data must be of the following format:
# [morning1, morning2, afternoon1, afternoon2, evening1, evening2]
# If there is no data for a given time slot,
# the value for that position should be None
forecast = [self.humanTerms(item) for item in data]
return (
('morning', self.stringify(*forecast[0:2])),
('afternoon', self.stringify(*forecast[2:4])),
('evening', self.stringify(*forecast[4:]))
)
@classmethod
def humanTerms(cls, item):
if item is None:
return (None, None)
terms = cls.terms
description = []
temperature = [
int(item['main']['temp_min']),
int(item['main']['temp_max']),
int(item['main']['temp'])
]
clouds = item['clouds']['all']
for num, string in terms['cloud']:
if clouds <= num:
if string:
description.append(string)
break
wind = item['wind']['speed']
for num, string in terms['wind']:
if wind <= num:
if string:
description.append(string)
break
if 'snow' in item:
snow = item['snow']['3h']
for num, string in terms['intensity']:
if snow <= num:
if string:
description.append(string + ' snow')
break
if 'rain' in item:
rain = item['rain']['3h']
for num, string in terms['intensity']:
if rain <= num:
if string:
description.append(string + ' rain')
break
# Unfortunately, openWeatherMap doesn't
# have PoP (probability of percipitation) data
return temperature, description
@staticmethod
def stringify(weather1, weather2):
# Possible conditions:
# (a): The two conditions are the same
# (b): The two conditions are different
# (c): Only one condition exists
temperature1, description1 = weather1
temperature2, description2 = weather2
if not (temperature1 and temperature2):
if temperature1:
temperature2 = temperature1
description2 = description1
elif temperature2:
temperature1 = temperature2
description1 = description2
else:
return ''
low = temperature1[0]
high = temperature2[1]
def _stringify(elements):
if len(elements) == 1:
return ', '.join(elements)
else:
return '%s and %s' % (', '.join(elements[:-1]), elements[-1])
if description1 == description2:
return (
"%s (high %s/low %s)"
% (_stringify(description1), high, low)
)
else:
# Remove items from description2 that are in description1
description2 = [
item
for item in description2
if item not in description1
]
return '%s, becoming %s later on (high %s/low %s)' % (
_stringify(description1), _stringify(description2), high, low
)
class OpenWeatherMap(lib.spell.BaseSpell):
""" Gets the current weather conditions and forecast """
weight = 100
pattern = r"""
# What is the current weather?
# What is the current forecast?
# What will today's weather be like?
# What will saturday's weather be like?
# What is the forecast for next Tuesday?
# What is next Friday's forecast for Dallas, Texas?
# What is the weather like in Dallas, Texas?
(?:
What
\s+(?:is|will)
(?:\s+(?:the|be))?
(
\s+.+
\s+(?:weather|forecast)
[^?]*
)
\?*
)
"""
offsetKeys = set([
'current', 'today', 'tomorrow', 'monday', 'tuesday',
'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'weekend'
])
offsets = {
'current': [rdelta.relativedelta(days=0)],
'today': [rdelta.relativedelta(days=0)],
'tomorrow': [rdelta.relativedelta(days=1)],
'monday': [rdelta.relativedelta(weekday=rdelta.MO)],
'tuesday': [rdelta.relativedelta(weekday=rdelta.TU)],
'wednesday': [rdelta.relativedelta(weekday=rdelta.WE)],
'thursday': [rdelta.relativedelta(weekday=rdelta.TH)],
'friday': [rdelta.relativedelta(weekday=rdelta.FR)],
'saturday': [rdelta.relativedelta(weekday=rdelta.SA)],
'sunday': [rdelta.relativedelta(weekday=rdelta.SU)],
'weekend': [
rdelta.relativedelta(weekday=rdelta.SA),
rdelta.relativedelta(weekday=rdelta.SU)
],
}
hours = [
' 05:00:00', # morning 1
' 08:00:00', # morning 2
' 11:00:00', # afternoon 1
' 14:00:00', # afternoon 2
' 17:00:00', # evening 1
' 20:00:00', # evening 2
]
config = {
'Weather.Location': str,
'Weather.Units': [str, 'metric', 'imperial']
}
def incantation(self, query, config, state):
result = ['']
if state is None:
state = {}
today = self.today()
query_words = [word for word in query.split(' ') if word]
if 'next' in query_words:
# There's no way we'll have enough data
return None, state
if 'for' in query_words:
index = query_words.index('for')
query_loc = ' '.join(query_words[index+1:])
del query_words[index:]
elif 'in' in query_words:
index = query_words.index('in')
query_loc = ' '.join(query_words[index+1:])
del query_words[index:]
else:
query_loc = config['Weather.Location']
query_words = set((word.rstrip("'s").lower() for word in query_words))
query_words.intersection_update(self.offsetKeys)
try:
weekday = tuple(query_words)[0]
offsets = self.offsets[weekday]
except (KeyError, IndexError):
# Just give the current weather
weekday = 'current'
offsets = self.offsets[weekday]
try:
location_id = state[query_loc]
except KeyError:
data = self.fetch(
'http://api.openweathermap.org/data/2.1/find/name',
get={'q': query_loc},
format='json'
)
try:
location_id = state[query_loc] = data['list'][0]['id']
except KeyError:
return None, state
if weekday in ('current', 'today'):
url = (
'http://api.openweathermap.org/data/2.1/weather/city/%s'
% location_id
)
data = self.fetch(
url,
get={'units': config['Weather.Units']},
format='json'
)
temperature, description = Forecast.humanTerms(data)
elements = [str(temperature[2])] + description
result.append(
Templates.current
% (', '.join(elements[:-1]), elements[-1])
)
weekday = 'today'
# Finally, let's get the data!
url = (
'http://api.openweathermap.org/data/2.2/forecast/city/%s'
% location_id
)
data = self.fetch(
url,
get={'units': config['Weather.Units']},
format='json'
)
for offset in offsets:
if len(result) > 1:
result.append("\n\n")
date_obj = today + offset
date_string = date_obj.strftime('%Y-%m-%d')
dates = [date_string + hour for hour in self.hours]
forecast_dates = dict(((date, None) for date in dates))
forecast_parts = list(Templates.forecast_parts)
for item in data['list']:
if item['dt_txt'] in forecast_dates:
forecast_dates[item['dt_txt']] = item
values = {
'date': date_obj.strftime('%a, %b %e')
}
forecast = (
Forecast((
forecast_dates[x] for x in sorted(forecast_dates)
))
)
for values['period'], values['description'] in forecast:
if values['description']:
result.append(forecast_parts.pop() % values)
return "".join(result), state
|
|
import laph3 as L
NONE = ()
def Spaces(n):
if n > 20:
raise 'TOO DEEP'
return '\n\n' + (n * ' * ')
class AltVisitor:
def __init__(compiled):
.compiled = compiled
.top = .compiled.tree
.mem = {}
def visitTuple(p, here, togo, shadow, real, stops, depth):
print '%s visitTuple <<< %s [[ %s ]] %s || %s' % (Spaces(depth), here, togo, shadow, real)
say here, togo, shadow, real, stops
# dic
h, t = L.HT(togo)
if not h:
# Return tuple keys.
z = set(p.dic.keys())
print '%s visitTuple >>> %s' % (Spaces(depth), repr(z))
return z
hereH = L.J(here, h)
x = p.dic.get(h, NONE)
say p.dic
say here, togo, real, h, t, x
if x is NONE:
return ('bad', 'visitTuple cannot find %q in %q' % (h, here))
shadowH = L.J(shadow, h) if shadow else shadow
realH = L.J(real, h) if real else real
say hereH, t, shadowH, realH, stops
z = x.visit(self, here=hereH, togo=t, shadow=shadowH, real=realH, stops=stops, depth=depth+1)
print '%s visitTuple >>> %s' % (Spaces(depth), repr(z))
return z
def visitDerive(p, here, togo, shadow, real, stops, depth):
print '%s visitDerive %s <<< %s [[ %s ]] %s || %s' % (Spaces(depth), p.template, here, togo, shadow, real)
# template, diff
#d, n = .LookupDir(p.template, L.D(here))
absD, n = .LookupDir(p.template, L.D(real), depth=depth+1)
absT = L.J(absD, p.template)
say absT, n, p.template, real, L.D(real)
h, t = L.HT(togo)
say absT
tem = .EvalPath(absT, depth=depth+1)
say absT, tem
must type(tem) is set, tem, absT, here, togo
if not h:
# Return derived tuple keys.
z = set(tem | set(p.diff.keys())) # Union template & diff keys.
say z
print '%s visitDerive >>> %s' % (Spaces(depth), repr(z))
return z
say h
hereH = L.J(here, h)
realH = L.J(real, h) if real else real
x = p.diff.get(h, NONE)
say h, hereH, realH, x, x is NONE
if x is NONE:
z = .EvalPath(L.J(absD, h, t), real=real, depth=depth+1) # Add real path.
say here, togo, z
print '%s visitDerive >>> %s' % (Spaces(depth), repr(z))
return z
# Shadow begins here, when we go down the diff path.
z = x.visit(self, here=hereH, togo=t, shadow=L.J(absD, h), real=realH, stops=stops, depth=depth+1)
say here, togo, z
print '%s visitDerive >>> %s' % (Spaces(depth), repr(z))
return z
def visitEnhance(p, here, togo, shadow, real, stops, depth):
print '%s visitEnhance %s <<< %s [[ %s ]] %s || %s' % (Spaces(depth), p.dslot, here, togo, shadow, real)
# dslot, diff
h, t = L.HT(togo)
say here, togo, shadow, real, stops, h, t
if not h:
# Just the enhanced directory.
enhancedSet = set(p.diff.keys())
shadowSet = .EvalPath(shadow, real, stops=stops, depth=depth+1)
say here, togo, shadow, real, stops
say enhancedSet
say shadowSet
must type(shadowSet) is set
z = enhancedSet | shadowSet # Union.
print '%s visitEnhance >>> %s' % (Spaces(depth), repr(z))
return z
hereH = L.J(here, h)
realH = L.J(real, h) if real else real
x = p.diff.get(h, NONE)
if x is NONE:
z = .EvalPath(L.J(shadow, h, t), real=real, depth=depth+1)
say here, togo, shadow, h, t, z
print '%s visitEnhance >>> %s' % (Spaces(depth), repr(z))
return z
z = x.visit(self, here=hereH, togo=t, shadow=L.J(shadow, h), real=realH, stops=stops, depth=depth+1)
say here, togo, shadow, h, t, z
print '%s visitEnhance >>> %s' % (Spaces(depth), repr(z))
return z
def visitBare(p, here, togo, shadow, real, stops, depth):
must not togo, here, togo
.mem[here] = p.a
return p.a
def visitCommand(p, here, togo, shadow, real, stops, depth):
raise 'TODO'
def EvalPath(path, shadow=None, stops=None, real=None, depth=0):
# Start with the path as real.
real2 = real if real else '/'
say path, shadow, stops, real
z = .visitTuple(.top, here='/', togo=path, shadow=shadow, real=real2, stops=stops, depth=depth+1)
say path, shadow, stops, real, z
return z
def LookupDir(path, cwd, depth=0):
if path.startswith('/'):
return '/', None
vec = L.S(path)
if not vec: raise 'No valid path: %q' % path
hd = vec[0]
say hd, path, cwd
d = cwd
prev = None
n = 0
while prev not in ['/', '', '.']:
try:
say path, cwd, d
x = .EvalPath(L.J(d, hd), depth=depth+1)
say path, cwd, d, x
if x is not None:
return d, n
except as ex:
say path, cwd, d, ex
pass
prev = d
d = L.D(d)
n += 1
raise 'Path %q not found in or above directory %q' % (path, cwd)
################################
t1 = L.Compile(`
a = bilbo
b = {
c = {
d = frodo
}
e = {
d = samwise
}
}
`)
av1 = AltVisitor(t1)
must av1.EvalPath('/') == set(['a', 'b'])
must av1.EvalPath('/a') == 'bilbo'
must av1.EvalPath('/b') == set(['c', 'e'])
must av1.EvalPath('/b/c') == set(['d'])
must av1.EvalPath('/b/c/d') == 'frodo'
################################
t2 = L.Compile(`
Q = { a = 111 ; b = 222 }
R = Q { a = 777 ; c = 888}
`)
av2 = AltVisitor(t2)
must av2.EvalPath('/') == set(['Q', 'R'])
must av2.EvalPath('/Q') == set(['a', 'b'])
must av2.EvalPath('/R') == set(['a', 'b', 'c'])
must av2.EvalPath('/Q/a') == '111'
must av2.EvalPath('/Q/b') == '222'
must av2.EvalPath('/R/a') == '777'
must av2.EvalPath('/R/b') == '222'
must av2.EvalPath('/R/c') == '888'
################################
t3 = L.Compile(`
X = {
M = { a = 111 ; b = 222 }
N = { c = 333 ; d = 444 }
}
Y = X {
M { a = 555 ; f = 666 }
P = { z = 888 }
}
`)
av3 = AltVisitor(t3)
must av3.EvalPath('/') == set(['X', 'Y'])
must av3.EvalPath('/X') == set(['M', 'N'])
must av3.EvalPath('/Y') == set(['M', 'N', 'P'])
must av3.EvalPath('/X/M/a') == '111'
must av3.EvalPath('/X/M/b') == '222'
must av3.EvalPath('/X/N/c') == '333'
must av3.EvalPath('/X/N/d') == '444'
must av3.EvalPath('/Y/M') == set(['a', 'b', 'f'])
must av3.EvalPath('/Y/M/a') == '555'
must av3.EvalPath('/Y/M/b') == '222'
must av3.EvalPath('/Y/M/f') == '666'
must av3.EvalPath('/Y/N/d') == '444'
must av3.EvalPath('/Y/P/z') == '888'
################################
t4 = L.Compile(`
OLD = {
info = { age = old }
P = info { size = small }
Q = P { size = medium }
}
NEW = OLD {
info { age = new }
}
`)
av4 = AltVisitor(t4)
must av4.EvalPath('/NEW/P/size') == 'small'
must av4.EvalPath('/NEW/P/age') == 'new'
################################
print 'OKAY laph3_alt.py'
|
|
# testing/exclusions.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from ..util import decorator
from . import config
from .. import util
import inspect
import contextlib
from sqlalchemy.util.compat import inspect_getargspec
def skip_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.skips.add(pred)
return rule
def fails_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.fails.add(pred)
return rule
class compound(object):
def __init__(self):
self.fails = set()
self.skips = set()
self.tags = set()
def __add__(self, other):
return self.add(other)
def add(self, *others):
copy = compound()
copy.fails.update(self.fails)
copy.skips.update(self.skips)
copy.tags.update(self.tags)
for other in others:
copy.fails.update(other.fails)
copy.skips.update(other.skips)
copy.tags.update(other.tags)
return copy
def not_(self):
copy = compound()
copy.fails.update(NotPredicate(fail) for fail in self.fails)
copy.skips.update(NotPredicate(skip) for skip in self.skips)
copy.tags.update(self.tags)
return copy
@property
def enabled(self):
return self.enabled_for_config(config._current)
def enabled_for_config(self, config):
for predicate in self.skips.union(self.fails):
if predicate(config):
return False
else:
return True
def matching_config_reasons(self, config):
return [
predicate._as_string(config) for predicate
in self.skips.union(self.fails)
if predicate(config)
]
def include_test(self, include_tags, exclude_tags):
return bool(
not self.tags.intersection(exclude_tags) and
(not include_tags or self.tags.intersection(include_tags))
)
def _extend(self, other):
self.skips.update(other.skips)
self.fails.update(other.fails)
self.tags.update(other.tags)
def __call__(self, fn):
if hasattr(fn, '_sa_exclusion_extend'):
fn._sa_exclusion_extend._extend(self)
return fn
@decorator
def decorate(fn, *args, **kw):
return self._do(config._current, fn, *args, **kw)
decorated = decorate(fn)
decorated._sa_exclusion_extend = self
return decorated
@contextlib.contextmanager
def fail_if(self):
all_fails = compound()
all_fails.fails.update(self.skips.union(self.fails))
try:
yield
except Exception as ex:
all_fails._expect_failure(config._current, ex)
else:
all_fails._expect_success(config._current)
def _do(self, config, fn, *args, **kw):
for skip in self.skips:
if skip(config):
msg = "'%s' : %s" % (
fn.__name__,
skip._as_string(config)
)
config.skip_test(msg)
try:
return_value = fn(*args, **kw)
except Exception as ex:
self._expect_failure(config, ex, name=fn.__name__)
else:
self._expect_success(config, name=fn.__name__)
return return_value
def _expect_failure(self, config, ex, name='block'):
for fail in self.fails:
if fail(config):
print(("%s failed as expected (%s): %s " % (
name, fail._as_string(config), str(ex))))
break
else:
util.raise_from_cause(ex)
def _expect_success(self, config, name='block'):
if not self.fails:
return
for fail in self.fails:
if not fail(config):
break
else:
raise AssertionError(
"Unexpected success for '%s' (%s)" %
(
name,
" and ".join(
fail._as_string(config)
for fail in self.fails
)
)
)
def requires_tag(tagname):
return tags([tagname])
def tags(tagnames):
comp = compound()
comp.tags.update(tagnames)
return comp
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate, description=None):
if isinstance(predicate, compound):
return cls.as_predicate(predicate.enabled_for_config, description)
elif isinstance(predicate, Predicate):
if description and predicate.description is None:
predicate.description = description
return predicate
elif isinstance(predicate, (list, set)):
return OrPredicate(
[cls.as_predicate(pred) for pred in predicate],
description)
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, util.string_types):
tokens = predicate.split(" ", 2)
op = spec = None
db = tokens.pop(0)
if tokens:
op = tokens.pop(0)
if tokens:
spec = tuple(int(d) for d in tokens.pop(0).split("."))
return SpecPredicate(db, op, spec, description=description)
elif util.callable(predicate):
return LambdaPredicate(predicate, description)
else:
assert False, "unknown predicate type: %s" % predicate
def _format_description(self, config, negate=False):
bool_ = self(config)
if negate:
bool_ = not negate
return self.description % {
"driver": config.db.url.get_driver_name(),
"database": config.db.url.get_backend_name(),
"doesnt_support": "doesn't support" if bool_ else "does support",
"does_support": "does support" if bool_ else "doesn't support"
}
def _as_string(self, config=None, negate=False):
raise NotImplementedError()
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self, config):
return self.value
def _as_string(self, config, negate=False):
return self._format_description(config, negate=negate)
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, config):
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split('+')
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = hasattr(self.op, '__call__') and self.op \
or self._ops[self.op]
return oper(version, self.spec)
else:
return True
def _as_string(self, config, negate=False):
if self.description is not None:
return self._format_description(config)
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (
self.db,
self.op,
self.spec
)
else:
return "%s %s %s" % (
self.db,
self.op,
self.spec
)
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
spec = inspect_getargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self, config):
return self.lambda_(config)
def _as_string(self, config, negate=False):
return self._format_description(config)
class NotPredicate(Predicate):
def __init__(self, predicate, description=None):
self.predicate = predicate
self.description = description
def __call__(self, config):
return not self.predicate(config)
def _as_string(self, config, negate=False):
if self.description:
return self._format_description(config, not negate)
else:
return self.predicate._as_string(config, not negate)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, config):
for pred in self.predicates:
if pred(config):
return True
return False
def _eval_str(self, config, negate=False):
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(p._as_string(config, negate=negate)
for p in self.predicates)
def _negation_str(self, config):
if self.description is not None:
return "Not " + self._format_description(config)
else:
return self._eval_str(config, negate=True)
def _as_string(self, config, negate=False):
if negate:
return self._negation_str(config)
else:
if self.description is not None:
return self._format_description(config)
else:
return self._eval_str(config)
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)(config._current)
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, 'server_version_info', ())
conn.close()
return version
def db_spec(*dbs):
return OrPredicate(
[Predicate.as_predicate(db) for db in dbs]
)
def open():
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
def fails():
return fails_if(BooleanPredicate(True, "expected to fail"))
@decorator
def future(fn, *arg):
return fails_if(LambdaPredicate(fn), "Future feature")
def fails_on(db, reason=None):
return fails_if(SpecPredicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
SpecPredicate(db) for db in dbs
])
)
def skip(db, reason=None):
return skip_if(SpecPredicate(db), reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)])
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(config, *queries):
assert queries, "no queries sent!"
return OrPredicate([
Predicate.as_predicate(query)
for query in queries
])(config)
|
|
# Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib.api import validators
from neutron_lib import constants
from oslo_utils import uuidutils
from sqlalchemy.orm import exc
from sqlalchemy.orm import scoped_session
from neutron._i18n import _
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import _deprecate
from neutron.common import constants as n_const
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2
from neutron.db.models import securitygroup as sg_models
from neutron.extensions import securitygroup as ext_sg
_deprecate._moved_global('DefaultSecurityGroup', new_module=sg_models)
_deprecate._moved_global('SecurityGroupPortBinding', new_module=sg_models)
_deprecate._moved_global('SecurityGroupRule', new_module=sg_models)
class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
"""Mixin class to add security group to db_base_plugin_v2."""
__native_bulk_support = True
def create_security_group_bulk(self, context, security_group_rule):
return self._create_bulk('security_group', context,
security_group_rule)
def _registry_notify(self, res, event, id=None, exc_cls=None, **kwargs):
# NOTE(armax): a callback exception here will prevent the request
# from being processed. This is a hook point for backend's validation;
# we raise to propagate the reason for the failure.
try:
registry.notify(res, event, self, **kwargs)
except exceptions.CallbackFailure as e:
if exc_cls:
reason = (_('cannot perform %(event)s due to %(reason)s') %
{'event': event, 'reason': e})
raise exc_cls(reason=reason, id=id)
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means we are a default security group for
a given tenant if it does not exist.
"""
s = security_group['security_group']
kwargs = {
'context': context,
'security_group': s,
'is_default': default_sg,
}
self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE,
exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
tenant_id = s['tenant_id']
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
with db_api.autonested_transaction(context.session):
security_group_db = sg_models.SecurityGroup(id=s.get('id') or (
uuidutils.generate_uuid()),
description=s['description'],
tenant_id=tenant_id,
name=s['name'])
context.session.add(security_group_db)
if default_sg:
context.session.add(sg_models.DefaultSecurityGroup(
security_group=security_group_db,
tenant_id=security_group_db['tenant_id']))
for ethertype in ext_sg.sg_supported_ethertypes:
if default_sg:
# Allow intercommunication
ingress_rule = sg_models.SecurityGroupRule(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
security_group=security_group_db,
direction='ingress',
ethertype=ethertype,
source_group=security_group_db)
context.session.add(ingress_rule)
egress_rule = sg_models.SecurityGroupRule(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
security_group=security_group_db,
direction='egress',
ethertype=ethertype)
context.session.add(egress_rule)
self._registry_notify(resources.SECURITY_GROUP,
events.PRECOMMIT_CREATE,
exc_cls=ext_sg.SecurityGroupConflict,
**kwargs)
secgroup_dict = self._make_security_group_dict(security_group_db)
kwargs['security_group'] = secgroup_dict
registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
**kwargs)
return secgroup_dict
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False, default_sg=False):
# If default_sg is True do not call _ensure_default_security_group()
# so this can be done recursively. Context.tenant_id is checked
# because all the unit tests do not explicitly set the context on
# GETS. TODO(arosen) context handling can probably be improved here.
if not default_sg and context.tenant_id:
tenant_id = filters.get('tenant_id')
if tenant_id:
tenant_id = tenant_id[0]
else:
tenant_id = context.tenant_id
self._ensure_default_security_group(context, tenant_id)
marker_obj = self._get_marker_obj(context, 'security_group', limit,
marker)
return self._get_collection(context,
sg_models.SecurityGroup,
self._make_security_group_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit, marker_obj=marker_obj,
page_reverse=page_reverse)
def get_security_groups_count(self, context, filters=None):
return self._get_collection_count(context, sg_models.SecurityGroup,
filters=filters)
def get_security_group(self, context, id, fields=None, tenant_id=None):
"""Tenant id is given to handle the case when creating a security
group rule on behalf of another use.
"""
if tenant_id:
tmp_context_tenant_id = context.tenant_id
context.tenant_id = tenant_id
try:
with context.session.begin(subtransactions=True):
ret = self._make_security_group_dict(self._get_security_group(
context, id), fields)
ret['security_group_rules'] = self.get_security_group_rules(
context, {'security_group_id': [id]})
finally:
if tenant_id:
context.tenant_id = tmp_context_tenant_id
return ret
def _get_security_group(self, context, id):
try:
query = self._model_query(context, sg_models.SecurityGroup)
sg = query.filter(sg_models.SecurityGroup.id == id).one()
except exc.NoResultFound:
raise ext_sg.SecurityGroupNotFound(id=id)
return sg
def delete_security_group(self, context, id):
filters = {'security_group_id': [id]}
ports = self._get_port_security_group_bindings(context, filters)
if ports:
raise ext_sg.SecurityGroupInUse(id=id)
# confirm security group exists
sg = self._get_security_group(context, id)
if sg['name'] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
kwargs = {
'context': context,
'security_group_id': id,
'security_group': sg,
}
self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_DELETE,
exc_cls=ext_sg.SecurityGroupInUse, id=id,
**kwargs)
with context.session.begin(subtransactions=True):
# pass security_group_rule_ids to ensure
# consistency with deleted rules
kwargs['security_group_rule_ids'] = [r['id'] for r in sg.rules]
self._registry_notify(resources.SECURITY_GROUP,
events.PRECOMMIT_DELETE,
exc_cls=ext_sg.SecurityGroupInUse, id=id,
**kwargs)
context.session.delete(sg)
kwargs.pop('security_group')
registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE, self,
**kwargs)
def update_security_group(self, context, id, security_group):
s = security_group['security_group']
kwargs = {
'context': context,
'security_group_id': id,
'security_group': s,
}
self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_UPDATE,
exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
with context.session.begin(subtransactions=True):
sg = self._get_security_group(context, id)
if sg['name'] == 'default' and 'name' in s:
raise ext_sg.SecurityGroupCannotUpdateDefault()
self._registry_notify(
resources.SECURITY_GROUP,
events.PRECOMMIT_UPDATE,
exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
sg.update(s)
sg_dict = self._make_security_group_dict(sg)
kwargs['security_group'] = sg_dict
registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
**kwargs)
return sg_dict
def _make_security_group_dict(self, security_group, fields=None):
res = {'id': security_group['id'],
'name': security_group['name'],
'tenant_id': security_group['tenant_id'],
'description': security_group['description']}
res['security_group_rules'] = [self._make_security_group_rule_dict(r)
for r in security_group.rules]
self._apply_dict_extend_functions(ext_sg.SECURITYGROUPS, res,
security_group)
return self._fields(res, fields)
def _make_security_group_binding_dict(self, security_group, fields=None):
res = {'port_id': security_group['port_id'],
'security_group_id': security_group['security_group_id']}
return self._fields(res, fields)
def _create_port_security_group_binding(self, context, port_id,
security_group_id):
with context.session.begin(subtransactions=True):
db = sg_models.SecurityGroupPortBinding(port_id=port_id,
security_group_id=security_group_id)
context.session.add(db)
def _get_port_security_group_bindings(self, context,
filters=None, fields=None):
return self._get_collection(context,
sg_models.SecurityGroupPortBinding,
self._make_security_group_binding_dict,
filters=filters, fields=fields)
def _delete_port_security_group_bindings(self, context, port_id):
query = self._model_query(context, sg_models.SecurityGroupPortBinding)
bindings = query.filter(
sg_models.SecurityGroupPortBinding.port_id == port_id)
with context.session.begin(subtransactions=True):
for binding in bindings:
context.session.delete(binding)
def create_security_group_rule_bulk(self, context, security_group_rules):
return self._create_bulk('security_group_rule', context,
security_group_rules)
def create_security_group_rule_bulk_native(self, context,
security_group_rules):
rules = security_group_rules['security_group_rules']
scoped_session(context.session)
security_group_id = self._validate_security_group_rules(
context, security_group_rules)
with context.session.begin(subtransactions=True):
if not self.get_security_group(context, security_group_id):
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
self._check_for_duplicate_rules(context, rules)
ret = []
for rule_dict in rules:
res_rule_dict = self._create_security_group_rule(
context, rule_dict, validate=False)
ret.append(res_rule_dict)
return ret
def create_security_group_rule(self, context, security_group_rule):
return self._create_security_group_rule(context, security_group_rule)
def _create_security_group_rule(self, context, security_group_rule,
validate=True):
if validate:
self._validate_security_group_rule(context, security_group_rule)
rule_dict = security_group_rule['security_group_rule']
kwargs = {
'context': context,
'security_group_rule': rule_dict
}
self._registry_notify(resources.SECURITY_GROUP_RULE,
events.BEFORE_CREATE,
exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
with context.session.begin(subtransactions=True):
if validate:
self._check_for_duplicate_rules_in_db(context,
security_group_rule)
db = sg_models.SecurityGroupRule(
id=(rule_dict.get('id') or uuidutils.generate_uuid()),
tenant_id=rule_dict['tenant_id'],
security_group_id=rule_dict['security_group_id'],
direction=rule_dict['direction'],
remote_group_id=rule_dict.get('remote_group_id'),
ethertype=rule_dict['ethertype'],
protocol=rule_dict['protocol'],
port_range_min=rule_dict['port_range_min'],
port_range_max=rule_dict['port_range_max'],
remote_ip_prefix=rule_dict.get('remote_ip_prefix'),
description=rule_dict.get('description')
)
context.session.add(db)
self._registry_notify(resources.SECURITY_GROUP_RULE,
events.PRECOMMIT_CREATE,
exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
res_rule_dict = self._make_security_group_rule_dict(db)
kwargs['security_group_rule'] = res_rule_dict
registry.notify(
resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self,
**kwargs)
return res_rule_dict
def _get_ip_proto_number(self, protocol):
if protocol is None:
return
# According to bug 1381379, protocol is always set to string to avoid
# problems with comparing int and string in PostgreSQL. Here this
# string is converted to int to give an opportunity to use it as
# before.
if protocol in n_const.IP_PROTOCOL_NAME_ALIASES:
protocol = n_const.IP_PROTOCOL_NAME_ALIASES[protocol]
return int(constants.IP_PROTOCOL_MAP.get(protocol, protocol))
def _get_ip_proto_name_and_num(self, protocol):
if protocol is None:
return
protocol = str(protocol)
if protocol in constants.IP_PROTOCOL_MAP:
return [protocol, str(constants.IP_PROTOCOL_MAP.get(protocol))]
elif protocol in n_const.IP_PROTOCOL_NUM_TO_NAME_MAP:
return [n_const.IP_PROTOCOL_NUM_TO_NAME_MAP.get(protocol),
protocol]
return [protocol, protocol]
def _validate_port_range(self, rule):
"""Check that port_range is valid."""
if (rule['port_range_min'] is None and
rule['port_range_max'] is None):
return
if not rule['protocol']:
raise ext_sg.SecurityGroupProtocolRequiredWithPorts()
ip_proto = self._get_ip_proto_number(rule['protocol'])
if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]:
if rule['port_range_min'] == 0 or rule['port_range_max'] == 0:
raise ext_sg.SecurityGroupInvalidPortValue(port=0)
elif (rule['port_range_min'] is not None and
rule['port_range_max'] is not None and
rule['port_range_min'] <= rule['port_range_max']):
pass
else:
raise ext_sg.SecurityGroupInvalidPortRange()
elif ip_proto in [constants.PROTO_NUM_ICMP,
constants.PROTO_NUM_IPV6_ICMP]:
for attr, field in [('port_range_min', 'type'),
('port_range_max', 'code')]:
if rule[attr] is not None and not (0 <= rule[attr] <= 255):
raise ext_sg.SecurityGroupInvalidIcmpValue(
field=field, attr=attr, value=rule[attr])
if (rule['port_range_min'] is None and
rule['port_range_max'] is not None):
raise ext_sg.SecurityGroupMissingIcmpType(
value=rule['port_range_max'])
def _validate_ethertype_and_protocol(self, rule):
"""Check if given ethertype and protocol are valid or not"""
if rule['protocol'] in [constants.PROTO_NAME_IPV6_ENCAP,
constants.PROTO_NAME_IPV6_FRAG,
constants.PROTO_NAME_IPV6_ICMP,
constants.PROTO_NAME_IPV6_ICMP_LEGACY,
constants.PROTO_NAME_IPV6_NONXT,
constants.PROTO_NAME_IPV6_OPTS,
constants.PROTO_NAME_IPV6_ROUTE]:
if rule['ethertype'] == constants.IPv4:
raise ext_sg.SecurityGroupEthertypeConflictWithProtocol(
ethertype=rule['ethertype'], protocol=rule['protocol'])
def _validate_single_tenant_and_group(self, security_group_rules):
"""Check that all rules belong to the same security group and tenant
"""
sg_groups = set()
tenants = set()
for rule_dict in security_group_rules['security_group_rules']:
rule = rule_dict['security_group_rule']
sg_groups.add(rule['security_group_id'])
if len(sg_groups) > 1:
raise ext_sg.SecurityGroupNotSingleGroupRules()
tenants.add(rule['tenant_id'])
if len(tenants) > 1:
raise ext_sg.SecurityGroupRulesNotSingleTenant()
return sg_groups.pop()
def _validate_security_group_rule(self, context, security_group_rule):
rule = security_group_rule['security_group_rule']
self._validate_port_range(rule)
self._validate_ip_prefix(rule)
self._validate_ethertype_and_protocol(rule)
if rule['remote_ip_prefix'] and rule['remote_group_id']:
raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix()
remote_group_id = rule['remote_group_id']
# Check that remote_group_id exists for tenant
if remote_group_id:
self.get_security_group(context, remote_group_id,
tenant_id=rule['tenant_id'])
security_group_id = rule['security_group_id']
# Confirm that the tenant has permission
# to add rules to this security group.
self.get_security_group(context, security_group_id,
tenant_id=rule['tenant_id'])
return security_group_id
def _validate_security_group_rules(self, context, security_group_rules):
sg_id = self._validate_single_tenant_and_group(security_group_rules)
for rule in security_group_rules['security_group_rules']:
self._validate_security_group_rule(context, rule)
return sg_id
def _make_security_group_rule_dict(self, security_group_rule, fields=None):
res = {'id': security_group_rule['id'],
'tenant_id': security_group_rule['tenant_id'],
'security_group_id': security_group_rule['security_group_id'],
'ethertype': security_group_rule['ethertype'],
'direction': security_group_rule['direction'],
'protocol': security_group_rule['protocol'],
'port_range_min': security_group_rule['port_range_min'],
'port_range_max': security_group_rule['port_range_max'],
'remote_ip_prefix': security_group_rule['remote_ip_prefix'],
'remote_group_id': security_group_rule['remote_group_id']}
self._apply_dict_extend_functions(ext_sg.SECURITYGROUPRULES, res,
security_group_rule)
return self._fields(res, fields)
def _make_security_group_rule_filter_dict(self, security_group_rule):
sgr = security_group_rule['security_group_rule']
res = {'tenant_id': [sgr['tenant_id']],
'security_group_id': [sgr['security_group_id']],
'direction': [sgr['direction']]}
include_if_present = ['protocol', 'port_range_max', 'port_range_min',
'ethertype', 'remote_ip_prefix',
'remote_group_id']
for key in include_if_present:
value = sgr.get(key)
if value:
res[key] = [value]
# protocol field will get corresponding name and number
value = sgr.get('protocol')
if value:
res['protocol'] = self._get_ip_proto_name_and_num(value)
return res
def _rules_equal(self, rule1, rule2):
"""Determines if two rules are equal ignoring id field."""
rule1_copy = rule1.copy()
rule2_copy = rule2.copy()
rule1_copy.pop('id', None)
rule2_copy.pop('id', None)
return rule1_copy == rule2_copy
def _check_for_duplicate_rules(self, context, security_group_rules):
for i in security_group_rules:
found_self = False
for j in security_group_rules:
if self._rules_equal(i['security_group_rule'],
j['security_group_rule']):
if found_self:
raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i)
found_self = True
self._check_for_duplicate_rules_in_db(context, i)
def _check_for_duplicate_rules_in_db(self, context, security_group_rule):
# Check in database if rule exists
filters = self._make_security_group_rule_filter_dict(
security_group_rule)
rule_dict = security_group_rule['security_group_rule'].copy()
rule_dict.pop('description', None)
keys = rule_dict.keys()
fields = list(keys) + ['id']
db_rules = self.get_security_group_rules(context, filters,
fields=fields)
# Note(arosen): the call to get_security_group_rules wildcards
# values in the filter that have a value of [None]. For
# example, filters = {'remote_group_id': [None]} will return
# all security group rules regardless of their value of
# remote_group_id. Therefore it is not possible to do this
# query unless the behavior of _get_collection()
# is changed which cannot be because other methods are already
# relying on this behavior. Therefore, we do the filtering
# below to check for these corner cases.
rule_dict.pop('id', None)
sg_protocol = rule_dict.pop('protocol', None)
for db_rule in db_rules:
rule_id = db_rule.pop('id', None)
# remove protocol and match separately for number and type
db_protocol = db_rule.pop('protocol', None)
is_protocol_matching = (
self._get_ip_proto_name_and_num(db_protocol) ==
self._get_ip_proto_name_and_num(sg_protocol))
if (is_protocol_matching and rule_dict == db_rule):
raise ext_sg.SecurityGroupRuleExists(rule_id=rule_id)
def _validate_ip_prefix(self, rule):
"""Check that a valid cidr was specified as remote_ip_prefix
No need to check that it is in fact an IP address as this is already
validated by attribute validators.
Check that rule ethertype is consistent with remote_ip_prefix ip type.
Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32).
"""
input_prefix = rule['remote_ip_prefix']
if input_prefix:
addr = netaddr.IPNetwork(input_prefix)
# set input_prefix to always include the netmask:
rule['remote_ip_prefix'] = str(addr)
# check consistency of ethertype with addr version
if rule['ethertype'] != "IPv%d" % (addr.version):
raise ext_sg.SecurityGroupRuleParameterConflict(
ethertype=rule['ethertype'], cidr=input_prefix)
def get_security_group_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'security_group_rule',
limit, marker)
return self._get_collection(context,
sg_models.SecurityGroupRule,
self._make_security_group_rule_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit, marker_obj=marker_obj,
page_reverse=page_reverse)
def get_security_group_rules_count(self, context, filters=None):
return self._get_collection_count(context, sg_models.SecurityGroupRule,
filters=filters)
def get_security_group_rule(self, context, id, fields=None):
security_group_rule = self._get_security_group_rule(context, id)
return self._make_security_group_rule_dict(security_group_rule, fields)
def _get_security_group_rule(self, context, id):
try:
query = self._model_query(context, sg_models.SecurityGroupRule)
sgr = query.filter(sg_models.SecurityGroupRule.id == id).one()
except exc.NoResultFound:
raise ext_sg.SecurityGroupRuleNotFound(id=id)
return sgr
def delete_security_group_rule(self, context, id):
kwargs = {
'context': context,
'security_group_rule_id': id
}
self._registry_notify(resources.SECURITY_GROUP_RULE,
events.BEFORE_DELETE, id=id,
exc_cls=ext_sg.SecurityGroupRuleInUse, **kwargs)
with context.session.begin(subtransactions=True):
query = self._model_query(context,
sg_models.SecurityGroupRule).filter(
sg_models.SecurityGroupRule.id == id)
self._registry_notify(resources.SECURITY_GROUP_RULE,
events.PRECOMMIT_DELETE,
exc_cls=ext_sg.SecurityGroupRuleInUse, id=id,
**kwargs)
try:
# As there is a filter on a primary key it is not possible for
# MultipleResultsFound to be raised
context.session.delete(query.one())
except exc.NoResultFound:
raise ext_sg.SecurityGroupRuleNotFound(id=id)
registry.notify(
resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self,
**kwargs)
def _extend_port_dict_security_group(self, port_res, port_db):
# Security group bindings will be retrieved from the SQLAlchemy
# model. As they're loaded eagerly with ports because of the
# joined load they will not cause an extra query.
security_group_ids = [sec_group_mapping['security_group_id'] for
sec_group_mapping in port_db.security_groups]
port_res[ext_sg.SECURITYGROUPS] = security_group_ids
return port_res
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_extend_port_dict_security_group'])
def _process_port_create_security_group(self, context, port,
security_group_ids):
if validators.is_attr_set(security_group_ids):
for security_group_id in security_group_ids:
self._create_port_security_group_binding(context, port['id'],
security_group_id)
# Convert to list as a set might be passed here and
# this has to be serialized
port[ext_sg.SECURITYGROUPS] = (security_group_ids and
list(security_group_ids) or [])
def _ensure_default_security_group(self, context, tenant_id):
"""Create a default security group if one doesn't exist.
:returns: the default security group id for given tenant.
"""
try:
query = self._model_query(context, sg_models.DefaultSecurityGroup)
default_group = query.filter_by(tenant_id=tenant_id).one()
return default_group['security_group_id']
except exc.NoResultFound:
security_group = {
'security_group':
{'name': 'default',
'tenant_id': tenant_id,
'description': _('Default security group')}
}
return self.create_security_group(
context, security_group, default_sg=True)['id']
def _get_security_groups_on_port(self, context, port):
"""Check that all security groups on port belong to tenant.
:returns: all security groups IDs on port belonging to tenant.
"""
port = port['port']
if not validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
return
if port.get('device_owner') and utils.is_port_trusted(port):
return
port_sg = port.get(ext_sg.SECURITYGROUPS, [])
filters = {'id': port_sg}
tenant_id = port.get('tenant_id')
if tenant_id:
filters['tenant_id'] = [tenant_id]
valid_groups = set(g['id'] for g in
self.get_security_groups(context, fields=['id'],
filters=filters))
requested_groups = set(port_sg)
port_sg_missing = requested_groups - valid_groups
if port_sg_missing:
raise ext_sg.SecurityGroupNotFound(id=', '.join(port_sg_missing))
return requested_groups
def _ensure_default_security_group_on_port(self, context, port):
# we don't apply security groups for dhcp, router
port = port['port']
if port.get('device_owner') and utils.is_port_trusted(port):
return
default_sg = self._ensure_default_security_group(context,
port['tenant_id'])
if not validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)):
port[ext_sg.SECURITYGROUPS] = [default_sg]
def _check_update_deletes_security_groups(self, port):
"""Return True if port has as a security group and it's value
is either [] or not is_attr_set, otherwise return False
"""
if (ext_sg.SECURITYGROUPS in port['port'] and
not (validators.is_attr_set(port['port'][ext_sg.SECURITYGROUPS])
and port['port'][ext_sg.SECURITYGROUPS] != [])):
return True
return False
def _check_update_has_security_groups(self, port):
"""Return True if port has security_groups attribute set and
its not empty, or False otherwise.
This method is called both for port create and port update.
"""
if (ext_sg.SECURITYGROUPS in port['port'] and
(validators.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and
port['port'][ext_sg.SECURITYGROUPS] != [])):
return True
return False
def update_security_group_on_port(self, context, id, port,
original_port, updated_port):
"""Update security groups on port.
This method returns a flag which indicates request notification
is required and does not perform notification itself.
It is because another changes for the port may require notification.
"""
need_notify = False
port_updates = port['port']
if (ext_sg.SECURITYGROUPS in port_updates and
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
port_updates[ext_sg.SECURITYGROUPS])):
# delete the port binding and read it with the new rules
port_updates[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._delete_port_security_group_bindings(context, id)
self._process_port_create_security_group(
context,
updated_port,
port_updates[ext_sg.SECURITYGROUPS])
need_notify = True
else:
updated_port[ext_sg.SECURITYGROUPS] = (
original_port[ext_sg.SECURITYGROUPS])
return need_notify
_deprecate._MovedGlobals()
|
|
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from fileserver.HashCalc import HashSumType, hashsum, hashsum_of_file
from fileserver.ResponseWorker import ResponseByType, ResponseType
from fileserver.AdminWorker import InitializeVariables, GenerateAdmin, \
CheckAdminFromPostRequest, ChangeAdminPasswordFromPostRequest, \
CreateUserFromPostRequest, ResetUserPasswordFromPostRequest, \
DeleteUserFromPostRequest, GetUsersList, GetFileList, \
AdminGetFileResponseFromPostRequest, AdminDeleteFileFromPostRequest, \
CleanMediaDirectory
from fileserver.OptionWorker import IsInitialized, GetOptionList, \
ChangeOptionFromPostRequest
from fileserver.ResponseFormatWorker import GenerateOutput
from fileserver.OutputTableHeader import OutputTableHeader
@csrf_exempt
def initialize_server(request):
try:
if IsInitialized():
return ResponseByType(ResponseType.PermissionDenied, request)
initializeVariablesCode = InitializeVariables()
if (initializeVariablesCode != ResponseType.OK):
return ResponseByType(initializeVariablesCode, request)
(generateAdminCode, (login, password)) = GenerateAdmin()
if (generateAdminCode != ResponseType.OK):
return ResponseByType(generateAdminCode, request)
header = OutputTableHeader.InitializeServer.value
data = ((login, password),)
outputTable = GenerateOutput(header, data, request)
return HttpResponse(outputTable)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def change_admin_password(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
changeAdminPasswordCode = ChangeAdminPasswordFromPostRequest(request)
if (changeAdminPasswordCode != ResponseType.OK):
return ResponseByType(changeAdminPasswordCode, request)
return ResponseByType(ResponseType.OK, request)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def register_user(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
(createUserCode, password) = CreateUserFromPostRequest(request)
if (createUserCode != ResponseType.OK):
return ResponseByType(createUserCode, request)
header = OutputTableHeader.RegisterUser.value
data = ((password,),)
outputTable = GenerateOutput(header, data, request)
return HttpResponse(outputTable)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def reset_user_password(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
(resetUserPasswordCode, password) = \
ResetUserPasswordFromPostRequest(request)
if (resetUserPasswordCode != ResponseType.OK):
return ResponseByType(resetUserPasswordCode, request)
header = OutputTableHeader.ResetUserPassword.value
data = ((password,),)
outputTable = GenerateOutput(header, data, request)
return HttpResponse(outputTable)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def delete_user(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
deleteUserCode = DeleteUserFromPostRequest(request)
if (deleteUserCode != ResponseType.OK):
return ResponseByType(deleteUserCode, request)
return ResponseByType(ResponseType.OK, request)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def list_all_users(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
usersList = GetUsersList(request)
return HttpResponse(usersList)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def get_options_list(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
optionList = GetOptionList(request)
return HttpResponse(optionList)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def change_option(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
changeOptionCode = ChangeOptionFromPostRequest(request)
return ResponseByType(changeOptionCode, request)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def list_all_files(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
(getFileListCode, outputTable) = GetFileList(request)
if (getFileListCode != ResponseType.OK):
return ResponseByType(getFileListCode, request)
return HttpResponse(outputTable)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def admin_download_file(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
(downloadCode, fileResponse) = \
AdminGetFileResponseFromPostRequest(request)
if (downloadCode != ResponseType.OK):
return ResponseByType(downloadCode, request)
return fileResponse
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def delete_file(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
deleteFileCode = AdminDeleteFileFromPostRequest(request)
if (deleteFileCode != ResponseType.OK):
return ResponseByType(deleteFileCode, request)
return ResponseByType(ResponseType.OK, request)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def clean_media_directory(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
outputTable = CleanMediaDirectory(request)
return HttpResponse(outputTable)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
@csrf_exempt
def admin_get_version(request):
try:
checkAdminCode = CheckAdminFromPostRequest(request)
if (checkAdminCode != ResponseType.OK):
return ResponseByType(checkAdminCode, request)
header = OutputTableHeader.GetVersion.value
data = (("1.0",),)
outputTable = GenerateOutput(header, data, request)
return HttpResponse(outputTable)
except Exception:
return ResponseByType(ResponseType.UnknownError, request)
|
|
import unittest
from test import support
import binascii
import copy
import pickle
import random
import sys
from test.support import bigmemtest, _1G, _4G
zlib = support.import_module('zlib')
requires_Compress_copy = unittest.skipUnless(
hasattr(zlib.compressobj(), "copy"),
'requires Compress.copy()')
requires_Decompress_copy = unittest.skipUnless(
hasattr(zlib.decompressobj(), "copy"),
'requires Decompress.copy()')
class VersionTestCase(unittest.TestCase):
def test_library_version(self):
# Test that the major version of the actual library in use matches the
# major version that we were compiled against. We can't guarantee that
# the minor versions will match (even on the machine on which the module
# was compiled), and the API is stable between minor versions, so
# testing only the major versions avoids spurious failures.
self.assertEqual(zlib.ZLIB_RUNTIME_VERSION[0], zlib.ZLIB_VERSION[0])
class ChecksumTestCase(unittest.TestCase):
# checksum test cases
def test_crc32start(self):
self.assertEqual(zlib.crc32(b""), zlib.crc32(b"", 0))
self.assertTrue(zlib.crc32(b"abc", 0xffffffff))
def test_crc32empty(self):
self.assertEqual(zlib.crc32(b"", 0), 0)
self.assertEqual(zlib.crc32(b"", 1), 1)
self.assertEqual(zlib.crc32(b"", 432), 432)
def test_adler32start(self):
self.assertEqual(zlib.adler32(b""), zlib.adler32(b"", 1))
self.assertTrue(zlib.adler32(b"abc", 0xffffffff))
def test_adler32empty(self):
self.assertEqual(zlib.adler32(b"", 0), 0)
self.assertEqual(zlib.adler32(b"", 1), 1)
self.assertEqual(zlib.adler32(b"", 432), 432)
def test_penguins(self):
self.assertEqual(zlib.crc32(b"penguin", 0), 0x0e5c1a120)
self.assertEqual(zlib.crc32(b"penguin", 1), 0x43b6aa94)
self.assertEqual(zlib.adler32(b"penguin", 0), 0x0bcf02f6)
self.assertEqual(zlib.adler32(b"penguin", 1), 0x0bd602f7)
self.assertEqual(zlib.crc32(b"penguin"), zlib.crc32(b"penguin", 0))
self.assertEqual(zlib.adler32(b"penguin"),zlib.adler32(b"penguin",1))
def test_crc32_adler32_unsigned(self):
foo = b'abcdefghijklmnop'
# explicitly test signed behavior
self.assertEqual(zlib.crc32(foo), 2486878355)
self.assertEqual(zlib.crc32(b'spam'), 1138425661)
self.assertEqual(zlib.adler32(foo+foo), 3573550353)
self.assertEqual(zlib.adler32(b'spam'), 72286642)
def test_same_as_binascii_crc32(self):
foo = b'abcdefghijklmnop'
crc = 2486878355
self.assertEqual(binascii.crc32(foo), crc)
self.assertEqual(zlib.crc32(foo), crc)
self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
# Issue #10276 - check that inputs >=4 GiB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
@bigmemtest(size=_4G + 4, memuse=1, dry_run=False)
def test_big_buffer(self, size):
data = b"nyan" * (_1G + 1)
self.assertEqual(zlib.crc32(data), 1044521549)
self.assertEqual(zlib.adler32(data), 2256789997)
class ExceptionTestCase(unittest.TestCase):
# make sure we generate some expected errors
def test_badlevel(self):
# specifying compression level out of range causes an error
# (but -1 is Z_DEFAULT_COMPRESSION and apparently the zlib
# accepts 0 too)
self.assertRaises(zlib.error, zlib.compress, b'ERROR', 10)
def test_badargs(self):
self.assertRaises(TypeError, zlib.adler32)
self.assertRaises(TypeError, zlib.crc32)
self.assertRaises(TypeError, zlib.compress)
self.assertRaises(TypeError, zlib.decompress)
for arg in (42, None, '', 'abc', (), []):
self.assertRaises(TypeError, zlib.adler32, arg)
self.assertRaises(TypeError, zlib.crc32, arg)
self.assertRaises(TypeError, zlib.compress, arg)
self.assertRaises(TypeError, zlib.decompress, arg)
def test_badcompressobj(self):
# verify failure on building compress object with bad params
self.assertRaises(ValueError, zlib.compressobj, 1, zlib.DEFLATED, 0)
# specifying total bits too large causes an error
self.assertRaises(ValueError,
zlib.compressobj, 1, zlib.DEFLATED, zlib.MAX_WBITS + 1)
def test_baddecompressobj(self):
# verify failure on building decompress object with bad params
self.assertRaises(ValueError, zlib.decompressobj, -1)
def test_decompressobj_badflush(self):
# verify failure on calling decompressobj.flush with bad params
self.assertRaises(ValueError, zlib.decompressobj().flush, 0)
self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
@support.cpython_only
def test_overflow(self):
with self.assertRaisesRegex(OverflowError, 'int too large'):
zlib.decompress(b'', 15, sys.maxsize + 1)
with self.assertRaisesRegex(OverflowError, 'int too large'):
zlib.decompressobj().decompress(b'', sys.maxsize + 1)
with self.assertRaisesRegex(OverflowError, 'int too large'):
zlib.decompressobj().flush(sys.maxsize + 1)
class BaseCompressTestCase(object):
def check_big_compress_buffer(self, size, compress_func):
_1M = 1024 * 1024
# Generate 10 MiB worth of random, and expand it by repeating it.
# The assumption is that zlib's memory is not big enough to exploit
# such spread out redundancy.
data = b''.join([random.getrandbits(8 * _1M).to_bytes(_1M, 'little')
for i in range(10)])
data = data * (size // len(data) + 1)
try:
compress_func(data)
finally:
# Release memory
data = None
def check_big_decompress_buffer(self, size, decompress_func):
data = b'x' * size
try:
compressed = zlib.compress(data, 1)
finally:
# Release memory
data = None
data = decompress_func(compressed)
# Sanity check
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b'x')), 0)
finally:
data = None
class CompressTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression in one go (whole message compression)
def test_speech(self):
x = zlib.compress(HAMLET_SCENE)
self.assertEqual(zlib.decompress(x), HAMLET_SCENE)
def test_keywords(self):
x = zlib.compress(HAMLET_SCENE, level=3)
self.assertEqual(zlib.decompress(x), HAMLET_SCENE)
with self.assertRaises(TypeError):
zlib.compress(data=HAMLET_SCENE, level=3)
self.assertEqual(zlib.decompress(x,
wbits=zlib.MAX_WBITS,
bufsize=zlib.DEF_BUF_SIZE),
HAMLET_SCENE)
def test_speech128(self):
# compress more data
data = HAMLET_SCENE * 128
x = zlib.compress(data)
self.assertEqual(zlib.compress(bytearray(data)), x)
for ob in x, bytearray(x):
self.assertEqual(zlib.decompress(ob), data)
def test_incomplete_stream(self):
# A useful error message is given
x = zlib.compress(HAMLET_SCENE)
self.assertRaisesRegex(zlib.error,
"Error -5 while decompressing data: incomplete or truncated stream",
zlib.decompress, x[:-1])
# Memory use of the following functions takes into account overallocation
@bigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
compress = lambda s: zlib.compress(s, 1)
self.check_big_compress_buffer(size, compress)
@bigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
self.check_big_decompress_buffer(size, zlib.decompress)
@bigmemtest(size=_4G, memuse=1)
def test_large_bufsize(self, size):
# Test decompress(bufsize) parameter greater than the internal limit
data = HAMLET_SCENE * 10
compressed = zlib.compress(data, 1)
self.assertEqual(zlib.decompress(compressed, 15, size), data)
def test_custom_bufsize(self):
data = HAMLET_SCENE * 10
compressed = zlib.compress(data, 1)
self.assertEqual(zlib.decompress(compressed, 15, CustomInt()), data)
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
@bigmemtest(size=_4G + 100, memuse=4)
def test_64bit_compress(self, size):
data = b'x' * size
try:
comp = zlib.compress(data, 0)
self.assertEqual(zlib.decompress(comp), data)
finally:
comp = data = None
class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression object
def test_pair(self):
# straightforward compress/decompress objects
datasrc = HAMLET_SCENE * 128
datazip = zlib.compress(datasrc)
# should compress both bytes and bytearray data
for data in (datasrc, bytearray(datasrc)):
co = zlib.compressobj()
x1 = co.compress(data)
x2 = co.flush()
self.assertRaises(zlib.error, co.flush) # second flush should not work
self.assertEqual(x1 + x2, datazip)
for v1, v2 in ((x1, x2), (bytearray(x1), bytearray(x2))):
dco = zlib.decompressobj()
y1 = dco.decompress(v1 + v2)
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
self.assertIsInstance(dco.unconsumed_tail, bytes)
self.assertIsInstance(dco.unused_data, bytes)
def test_keywords(self):
level = 2
method = zlib.DEFLATED
wbits = -12
memLevel = 9
strategy = zlib.Z_FILTERED
co = zlib.compressobj(level=level,
method=method,
wbits=wbits,
memLevel=memLevel,
strategy=strategy,
zdict=b"")
do = zlib.decompressobj(wbits=wbits, zdict=b"")
with self.assertRaises(TypeError):
co.compress(data=HAMLET_SCENE)
with self.assertRaises(TypeError):
do.decompress(data=zlib.compress(HAMLET_SCENE))
x = co.compress(HAMLET_SCENE) + co.flush()
y = do.decompress(x, max_length=len(HAMLET_SCENE)) + do.flush()
self.assertEqual(HAMLET_SCENE, y)
def test_compressoptions(self):
# specify lots of options to compressobj()
level = 2
method = zlib.DEFLATED
wbits = -12
memLevel = 9
strategy = zlib.Z_FILTERED
co = zlib.compressobj(level, method, wbits, memLevel, strategy)
x1 = co.compress(HAMLET_SCENE)
x2 = co.flush()
dco = zlib.decompressobj(wbits)
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(HAMLET_SCENE, y1 + y2)
def test_compressincremental(self):
# compress object in steps, decompress object as one-shot
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = b''.join(bufs)
dco = zlib.decompressobj()
y1 = dco.decompress(b''.join(bufs))
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_decompinc(self, flush=False, source=None, cx=256, dcx=64):
# compress object in steps, decompress object in steps
source = source or HAMLET_SCENE
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = b''.join(bufs)
decombuf = zlib.decompress(combuf)
# Test type of return value
self.assertIsInstance(decombuf, bytes)
self.assertEqual(data, decombuf)
dco = zlib.decompressobj()
bufs = []
for i in range(0, len(combuf), dcx):
bufs.append(dco.decompress(combuf[i:i+dcx]))
self.assertEqual(b'', dco.unconsumed_tail, ########
"(A) uct should be b'': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(b'', dco.unused_data)
if flush:
bufs.append(dco.flush())
else:
while True:
chunk = dco.decompress(b'')
if chunk:
bufs.append(chunk)
else:
break
self.assertEqual(b'', dco.unconsumed_tail, ########
"(B) uct should be b'': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(b'', dco.unused_data)
self.assertEqual(data, b''.join(bufs))
# Failure means: "decompressobj with init options failed"
def test_decompincflush(self):
self.test_decompinc(flush=True)
def test_decompimax(self, source=None, cx=256, dcx=64):
# compress in steps, decompress in length-restricted steps
source = source or HAMLET_SCENE
# Check a decompression object with max_length specified
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = b''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
#max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, dcx)
self.assertFalse(len(chunk) > dcx,
'chunk too big (%d>%d)' % (len(chunk), dcx))
bufs.append(chunk)
cb = dco.unconsumed_tail
bufs.append(dco.flush())
self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlen(self, flush=False):
# Check a decompression object with max_length specified
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = b''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
cb = dco.unconsumed_tail
if flush:
bufs.append(dco.flush())
else:
while chunk:
chunk = dco.decompress(b'', max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlenflush(self):
self.test_decompressmaxlen(flush=True)
def test_maxlenmisc(self):
# Misc tests of max_length
dco = zlib.decompressobj()
self.assertRaises(ValueError, dco.decompress, b"", -1)
self.assertEqual(b'', dco.unconsumed_tail)
def test_maxlen_large(self):
# Sizes up to sys.maxsize should be accepted, although zlib is
# internally limited to expressing sizes with unsigned int
data = HAMLET_SCENE * 10
self.assertGreater(len(data), zlib.DEF_BUF_SIZE)
compressed = zlib.compress(data, 1)
dco = zlib.decompressobj()
self.assertEqual(dco.decompress(compressed, sys.maxsize), data)
def test_maxlen_custom(self):
data = HAMLET_SCENE * 10
compressed = zlib.compress(data, 1)
dco = zlib.decompressobj()
self.assertEqual(dco.decompress(compressed, CustomInt()), data[:100])
def test_clear_unconsumed_tail(self):
# Issue #12050: calling decompress() without providing max_length
# should clear the unconsumed_tail attribute.
cdata = b"x\x9cKLJ\x06\x00\x02M\x01" # "abc"
dco = zlib.decompressobj()
ddata = dco.decompress(cdata, 1)
ddata += dco.decompress(dco.unconsumed_tail)
self.assertEqual(dco.unconsumed_tail, b"")
def test_flushes(self):
# Test flush() with the various options, using all the
# different levels in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH',
'Z_PARTIAL_FLUSH']
ver = tuple(int(v) for v in zlib.ZLIB_RUNTIME_VERSION.split('.'))
# Z_BLOCK has a known failure prior to 1.2.5.3
if ver >= (1, 2, 5, 3):
sync_opt.append('Z_BLOCK')
sync_opt = [getattr(zlib, opt) for opt in sync_opt
if hasattr(zlib, opt)]
data = HAMLET_SCENE * 8
for sync in sync_opt:
for level in range(10):
try:
obj = zlib.compressobj( level )
a = obj.compress( data[:3000] )
b = obj.flush( sync )
c = obj.compress( data[3000:] )
d = obj.flush()
except:
print("Error for flush mode={}, level={}"
.format(sync, level))
raise
self.assertEqual(zlib.decompress(b''.join([a,b,c,d])),
data, ("Decompress failed: flush "
"mode=%i, level=%i") % (sync, level))
del obj
@unittest.skipUnless(hasattr(zlib, 'Z_SYNC_FLUSH'),
'requires zlib.Z_SYNC_FLUSH')
def test_odd_flush(self):
# Test for odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
# Testing on 17K of "random" data
# Create compressor and decompressor objects
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
dco = zlib.decompressobj()
# Try 17K of data
# generate random data stream
try:
# In 2.3 and later, WichmannHill is the RNG of the bug report
gen = random.WichmannHill()
except AttributeError:
try:
# 2.2 called it Random
gen = random.Random()
except AttributeError:
# others might simply have a single RNG
gen = random
gen.seed(1)
data = genblock(1, 17 * 1024, generator=gen)
# compress, sync-flush, and decompress
first = co.compress(data)
second = co.flush(zlib.Z_SYNC_FLUSH)
expanded = dco.decompress(first + second)
# if decompressed data is different from the input data, choke.
self.assertEqual(expanded, data, "17K random source doesn't match")
def test_empty_flush(self):
# Test that calling .flush() on unused objects works.
# (Bug #1083110 -- calling .flush() on decompress objects
# caused a core dump.)
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
self.assertTrue(co.flush()) # Returns a zlib header
dco = zlib.decompressobj()
self.assertEqual(dco.flush(), b"") # Returns nothing
def test_dictionary(self):
h = HAMLET_SCENE
# Build a simulated dictionary out of the words in HAMLET.
words = h.split()
random.shuffle(words)
zdict = b''.join(words)
# Use it to compress HAMLET.
co = zlib.compressobj(zdict=zdict)
cd = co.compress(h) + co.flush()
# Verify that it will decompress with the dictionary.
dco = zlib.decompressobj(zdict=zdict)
self.assertEqual(dco.decompress(cd) + dco.flush(), h)
# Verify that it fails when not given the dictionary.
dco = zlib.decompressobj()
self.assertRaises(zlib.error, dco.decompress, cd)
def test_dictionary_streaming(self):
# This simulates the reuse of a compressor object for compressing
# several separate data streams.
co = zlib.compressobj(zdict=HAMLET_SCENE)
do = zlib.decompressobj(zdict=HAMLET_SCENE)
piece = HAMLET_SCENE[1000:1500]
d0 = co.compress(piece) + co.flush(zlib.Z_SYNC_FLUSH)
d1 = co.compress(piece[100:]) + co.flush(zlib.Z_SYNC_FLUSH)
d2 = co.compress(piece[:-100]) + co.flush(zlib.Z_SYNC_FLUSH)
self.assertEqual(do.decompress(d0), piece)
self.assertEqual(do.decompress(d1), piece[100:])
self.assertEqual(do.decompress(d2), piece[:-100])
def test_decompress_incomplete_stream(self):
# This is 'foo', deflated
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E'
# For the record
self.assertEqual(zlib.decompress(x), b'foo')
self.assertRaises(zlib.error, zlib.decompress, x[:-5])
# Omitting the stream end works with decompressor objects
# (see issue #8672).
dco = zlib.decompressobj()
y = dco.decompress(x[:-5])
y += dco.flush()
self.assertEqual(y, b'foo')
def test_decompress_eof(self):
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo'
dco = zlib.decompressobj()
self.assertFalse(dco.eof)
dco.decompress(x[:-5])
self.assertFalse(dco.eof)
dco.decompress(x[-5:])
self.assertTrue(dco.eof)
dco.flush()
self.assertTrue(dco.eof)
def test_decompress_eof_incomplete_stream(self):
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo'
dco = zlib.decompressobj()
self.assertFalse(dco.eof)
dco.decompress(x[:-5])
self.assertFalse(dco.eof)
dco.flush()
self.assertFalse(dco.eof)
def test_decompress_unused_data(self):
# Repeated calls to decompress() after EOF should accumulate data in
# dco.unused_data, instead of just storing the arg to the last call.
source = b'abcdefghijklmnopqrstuvwxyz'
remainder = b'0123456789'
y = zlib.compress(source)
x = y + remainder
for maxlen in 0, 1000:
for step in 1, 2, len(y), len(x):
dco = zlib.decompressobj()
data = b''
for i in range(0, len(x), step):
if i < len(y):
self.assertEqual(dco.unused_data, b'')
if maxlen == 0:
data += dco.decompress(x[i : i + step])
self.assertEqual(dco.unconsumed_tail, b'')
else:
data += dco.decompress(
dco.unconsumed_tail + x[i : i + step], maxlen)
data += dco.flush()
self.assertTrue(dco.eof)
self.assertEqual(data, source)
self.assertEqual(dco.unconsumed_tail, b'')
self.assertEqual(dco.unused_data, remainder)
# issue27164
def test_decompress_raw_with_dictionary(self):
zdict = b'abcdefghijklmnopqrstuvwxyz'
co = zlib.compressobj(wbits=-zlib.MAX_WBITS, zdict=zdict)
comp = co.compress(zdict) + co.flush()
dco = zlib.decompressobj(wbits=-zlib.MAX_WBITS, zdict=zdict)
uncomp = dco.decompress(comp) + dco.flush()
self.assertEqual(zdict, uncomp)
def test_flush_with_freed_input(self):
# Issue #16411: decompressor accesses input to last decompress() call
# in flush(), even if this object has been freed in the meanwhile.
input1 = b'abcdefghijklmnopqrstuvwxyz'
input2 = b'QWERTYUIOPASDFGHJKLZXCVBNM'
data = zlib.compress(input1)
dco = zlib.decompressobj()
dco.decompress(data, 1)
del data
data = zlib.compress(input2)
self.assertEqual(dco.flush(), input1[1:])
@bigmemtest(size=_4G, memuse=1)
def test_flush_large_length(self, size):
# Test flush(length) parameter greater than internal limit UINT_MAX
input = HAMLET_SCENE * 10
data = zlib.compress(input, 1)
dco = zlib.decompressobj()
dco.decompress(data, 1)
self.assertEqual(dco.flush(size), input[1:])
def test_flush_custom_length(self):
input = HAMLET_SCENE * 10
data = zlib.compress(input, 1)
dco = zlib.decompressobj()
dco.decompress(data, 1)
self.assertEqual(dco.flush(CustomInt()), input[1:])
@requires_Compress_copy
def test_compresscopy(self):
# Test copying a compression object
data0 = HAMLET_SCENE
data1 = bytes(str(HAMLET_SCENE, "ascii").swapcase(), "ascii")
for func in lambda c: c.copy(), copy.copy, copy.deepcopy:
c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
bufs0 = []
bufs0.append(c0.compress(data0))
c1 = func(c0)
bufs1 = bufs0[:]
bufs0.append(c0.compress(data0))
bufs0.append(c0.flush())
s0 = b''.join(bufs0)
bufs1.append(c1.compress(data1))
bufs1.append(c1.flush())
s1 = b''.join(bufs1)
self.assertEqual(zlib.decompress(s0),data0+data0)
self.assertEqual(zlib.decompress(s1),data0+data1)
@requires_Compress_copy
def test_badcompresscopy(self):
# Test copying a compression object in an inconsistent state
c = zlib.compressobj()
c.compress(HAMLET_SCENE)
c.flush()
self.assertRaises(ValueError, c.copy)
self.assertRaises(ValueError, copy.copy, c)
self.assertRaises(ValueError, copy.deepcopy, c)
@requires_Decompress_copy
def test_decompresscopy(self):
# Test copying a decompression object
data = HAMLET_SCENE
comp = zlib.compress(data)
# Test type of return value
self.assertIsInstance(comp, bytes)
for func in lambda c: c.copy(), copy.copy, copy.deepcopy:
d0 = zlib.decompressobj()
bufs0 = []
bufs0.append(d0.decompress(comp[:32]))
d1 = func(d0)
bufs1 = bufs0[:]
bufs0.append(d0.decompress(comp[32:]))
s0 = b''.join(bufs0)
bufs1.append(d1.decompress(comp[32:]))
s1 = b''.join(bufs1)
self.assertEqual(s0,s1)
self.assertEqual(s0,data)
@requires_Decompress_copy
def test_baddecompresscopy(self):
# Test copying a compression object in an inconsistent state
data = zlib.compress(HAMLET_SCENE)
d = zlib.decompressobj()
d.decompress(data)
d.flush()
self.assertRaises(ValueError, d.copy)
self.assertRaises(ValueError, copy.copy, d)
self.assertRaises(ValueError, copy.deepcopy, d)
def test_compresspickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(zlib.compressobj(zlib.Z_BEST_COMPRESSION), proto)
def test_decompresspickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(zlib.decompressobj(), proto)
# Memory use of the following functions takes into account overallocation
@bigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
c = zlib.compressobj(1)
compress = lambda s: c.compress(s) + c.flush()
self.check_big_compress_buffer(size, compress)
@bigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
d = zlib.decompressobj()
decompress = lambda s: d.decompress(s) + d.flush()
self.check_big_decompress_buffer(size, decompress)
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
@bigmemtest(size=_4G + 100, memuse=4)
def test_64bit_compress(self, size):
data = b'x' * size
co = zlib.compressobj(0)
do = zlib.decompressobj()
try:
comp = co.compress(data) + co.flush()
uncomp = do.decompress(comp) + do.flush()
self.assertEqual(uncomp, data)
finally:
comp = uncomp = data = None
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
@bigmemtest(size=_4G + 100, memuse=3)
def test_large_unused_data(self, size):
data = b'abcdefghijklmnop'
unused = b'x' * size
comp = zlib.compress(data) + unused
do = zlib.decompressobj()
try:
uncomp = do.decompress(comp) + do.flush()
self.assertEqual(unused, do.unused_data)
self.assertEqual(uncomp, data)
finally:
unused = comp = do = None
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
@bigmemtest(size=_4G + 100, memuse=5)
def test_large_unconsumed_tail(self, size):
data = b'x' * size
do = zlib.decompressobj()
try:
comp = zlib.compress(data, 0)
uncomp = do.decompress(comp, 1) + do.flush()
self.assertEqual(uncomp, data)
self.assertEqual(do.unconsumed_tail, b'')
finally:
comp = uncomp = data = None
def test_wbits(self):
# wbits=0 only supported since zlib v1.2.3.5
# Register "1.2.3" as "1.2.3.0"
# or "1.2.0-linux","1.2.0.f","1.2.0.f-linux"
v = zlib.ZLIB_RUNTIME_VERSION.split('-', 1)[0].split('.')
if len(v) < 4:
v.append('0')
elif not v[-1].isnumeric():
v[-1] = '0'
v = tuple(map(int, v))
supports_wbits_0 = v >= (1, 2, 3, 5)
co = zlib.compressobj(level=1, wbits=15)
zlib15 = co.compress(HAMLET_SCENE) + co.flush()
self.assertEqual(zlib.decompress(zlib15, 15), HAMLET_SCENE)
if supports_wbits_0:
self.assertEqual(zlib.decompress(zlib15, 0), HAMLET_SCENE)
self.assertEqual(zlib.decompress(zlib15, 32 + 15), HAMLET_SCENE)
with self.assertRaisesRegex(zlib.error, 'invalid window size'):
zlib.decompress(zlib15, 14)
dco = zlib.decompressobj(wbits=32 + 15)
self.assertEqual(dco.decompress(zlib15), HAMLET_SCENE)
dco = zlib.decompressobj(wbits=14)
with self.assertRaisesRegex(zlib.error, 'invalid window size'):
dco.decompress(zlib15)
co = zlib.compressobj(level=1, wbits=9)
zlib9 = co.compress(HAMLET_SCENE) + co.flush()
self.assertEqual(zlib.decompress(zlib9, 9), HAMLET_SCENE)
self.assertEqual(zlib.decompress(zlib9, 15), HAMLET_SCENE)
if supports_wbits_0:
self.assertEqual(zlib.decompress(zlib9, 0), HAMLET_SCENE)
self.assertEqual(zlib.decompress(zlib9, 32 + 9), HAMLET_SCENE)
dco = zlib.decompressobj(wbits=32 + 9)
self.assertEqual(dco.decompress(zlib9), HAMLET_SCENE)
co = zlib.compressobj(level=1, wbits=-15)
deflate15 = co.compress(HAMLET_SCENE) + co.flush()
self.assertEqual(zlib.decompress(deflate15, -15), HAMLET_SCENE)
dco = zlib.decompressobj(wbits=-15)
self.assertEqual(dco.decompress(deflate15), HAMLET_SCENE)
co = zlib.compressobj(level=1, wbits=-9)
deflate9 = co.compress(HAMLET_SCENE) + co.flush()
self.assertEqual(zlib.decompress(deflate9, -9), HAMLET_SCENE)
self.assertEqual(zlib.decompress(deflate9, -15), HAMLET_SCENE)
dco = zlib.decompressobj(wbits=-9)
self.assertEqual(dco.decompress(deflate9), HAMLET_SCENE)
co = zlib.compressobj(level=1, wbits=16 + 15)
gzip = co.compress(HAMLET_SCENE) + co.flush()
self.assertEqual(zlib.decompress(gzip, 16 + 15), HAMLET_SCENE)
self.assertEqual(zlib.decompress(gzip, 32 + 15), HAMLET_SCENE)
dco = zlib.decompressobj(32 + 15)
self.assertEqual(dco.decompress(gzip), HAMLET_SCENE)
def genblock(seed, length, step=1024, generator=random):
"""length-byte stream of random data from a seed (in step-byte blocks)."""
if seed is not None:
generator.seed(seed)
randint = generator.randint
if length < step or step < 2:
step = length
blocks = bytes()
for i in range(0, length, step):
blocks += bytes(randint(0, 255) for x in range(step))
return blocks
def choose_lines(source, number, seed=None, generator=random):
"""Return a list of number lines randomly chosen from the source"""
if seed is not None:
generator.seed(seed)
sources = source.split('\n')
return [generator.choice(sources) for n in range(number)]
HAMLET_SCENE = b"""
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts no tongue,
Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
class CustomInt:
def __index__(self):
return 100
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import absolute_import, unicode_literals
import datetime
from django.contrib.admin import (site, ModelAdmin, SimpleListFilter,
BooleanFieldListFilter)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings, six
from django.utils.encoding import force_text
from .models import Book, Department, Employee
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1/0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter):
title = 'department'
parameter_name = 'department'
def lookups(self, request, model_admin):
return sorted(set([
(employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code)
for employee in model_admin.queryset(request).all()
]))
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no')
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class DepartmentFilterEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithNonStringValue, ]
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', 'alfred@example.com')
self.bob = User.objects.create_user('bob', 'bob@example.com')
self.lisa = User.objects.create_user('lisa', 'lisa@example.com')
# Books
self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today)
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103)
self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago)
self.gipsy_book.contributors = [self.bob, self.lisa]
self.gipsy_book.save()
# Departments
self.dev = Department.objects.create(code='DEV', description='Development')
self.design = Department.objects.create(code='DSN', description='Design')
# Employees
self.john = Employee.objects.create(name='John Blue', department=self.dev)
self.jack = Employee.objects.create(name='Jack Red', department=self.design)
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin)
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today, self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(month=1, day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow)})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (str(self.one_week_ago), str(self.tomorrow)))
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed.
Refs #17828.
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
Ensure that list_filter works with two-characters long field names.
Refs #16080.
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in'.
Refs #17091.
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using non-string values
for lookups in SimpleListFilters.
Refs #19318
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department': self.john.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.pk)
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
|
|
# We no longer support the old, non-colour editor!
from pywin.mfc import docview, object
from pywin.framework.editor import GetEditorOption
import win32ui
import os
import win32con
import string
import traceback
import win32api
import shutil
BAK_NONE=0
BAK_DOT_BAK=1
BAK_DOT_BAK_TEMP_DIR=2
BAK_DOT_BAK_BAK_DIR=3
MSG_CHECK_EXTERNAL_FILE = win32con.WM_USER+1999 ## WARNING: Duplicated in editor.py and coloreditor.py
import pywin.scintilla.document
ParentEditorDocument=pywin.scintilla.document.CScintillaDocument
class EditorDocumentBase(ParentEditorDocument):
def __init__(self, template):
self.bAutoReload = GetEditorOption("Auto Reload", 1)
self.bDeclinedReload = 0 # Has the user declined to reload.
self.fileStat = None
self.bReportedFileNotFound = 0
# what sort of bak file should I create.
# default to write to %temp%/bak/filename.ext
self.bakFileType=GetEditorOption("Backup Type", BAK_DOT_BAK_BAK_DIR)
self.watcherThread = FileWatchingThread(self)
self.watcherThread.CreateThread()
# Should I try and use VSS integration?
self.scModuleName=GetEditorOption("Source Control Module", "")
self.scModule = None # Loaded when first used.
ParentEditorDocument.__init__(self, template, template.CreateWin32uiDocument())
def OnCloseDocument(self ):
self.watcherThread.SignalStop()
return self._obj_.OnCloseDocument()
# def OnOpenDocument(self, name):
# rc = ParentEditorDocument.OnOpenDocument(self, name)
# self.GetFirstView()._SetLoadedText(self.text)
# self._DocumentStateChanged()
# return rc
def OnSaveDocument( self, fileName ):
win32ui.SetStatusText("Saving file...",1)
# rename to bak if required.
dir, basename = os.path.split(fileName)
if self.bakFileType==BAK_DOT_BAK:
bakFileName=dir+'\\'+os.path.splitext(basename)[0]+'.bak'
elif self.bakFileType==BAK_DOT_BAK_TEMP_DIR:
bakFileName=win32api.GetTempPath()+'\\'+os.path.splitext(basename)[0]+'.bak'
elif self.bakFileType==BAK_DOT_BAK_BAK_DIR:
tempPath=os.path.join(win32api.GetTempPath(),'bak')
try:
os.mkdir(tempPath,0)
except os.error:
pass
bakFileName=os.path.join(tempPath,basename)
try:
os.unlink(bakFileName) # raise NameError if no bakups wanted.
except (os.error, NameError):
pass
try:
# Do a copy as it might be on different volumes,
# and the file may be a hard-link, causing the link
# to follow the backup.
shutil.copy2(fileName, bakFileName)
except (os.error, NameError, IOError):
pass
try:
self.SaveFile(fileName)
except IOError, details:
win32ui.MessageBox("Error - could not save file\r\n\r\n%s"%details)
return 0
except UnicodeEncodeError, details:
win32ui.MessageBox("Encoding failed: \r\n%s"%details +
'\r\nPlease add desired source encoding as first line of file, eg \r\n' +
'# -*- coding: mbcs -*-',
"File save failed")
return 0
self.SetModifiedFlag(0) # No longer dirty
self.bDeclinedReload = 0 # They probably want to know if it changes again!
win32ui.AddToRecentFileList(fileName)
self.SetPathName(fileName)
win32ui.SetStatusText("Ready")
self._DocumentStateChanged()
return 1
def FinalizeViewCreation(self, view):
ParentEditorDocument.FinalizeViewCreation(self, view)
if view == self.GetFirstView():
self._DocumentStateChanged()
if view.bFolding and GetEditorOption("Fold On Open", 0):
view.FoldTopLevelEvent()
def HookViewNotifications(self, view):
ParentEditorDocument.HookViewNotifications(self, view)
# Support for reloading the document from disk - presumably after some
# external application has modified it (or possibly source control has
# checked it out.
def ReloadDocument(self):
"""Reloads the document from disk. Assumes the file has
been saved and user has been asked if necessary - it just does it!
"""
win32ui.SetStatusText("Reloading document. Please wait...", 1)
self.SetModifiedFlag(0)
# Loop over all views, saving their state, then reload the document
views = self.GetAllViews()
states = []
for view in views:
try:
info = view._PrepareUserStateChange()
except AttributeError: # Not our editor view?
info = None
states.append(info)
self.OnOpenDocument(self.GetPathName())
for view, info in zip(views, states):
if info is not None:
view._EndUserStateChange(info)
self._DocumentStateChanged()
win32ui.SetStatusText("Document reloaded.")
# Reloading the file
def CheckExternalDocumentUpdated(self):
if self.bDeclinedReload or not self.GetPathName():
return
try:
newstat = os.stat(self.GetPathName())
except os.error, exc:
if not self.bReportedFileNotFound:
print "The file '%s' is open for editing, but\nchecking it for changes caused the error: %s" % (self.GetPathName(), exc.strerror)
self.bReportedFileNotFound = 1
return
if self.bReportedFileNotFound:
print "The file '%s' has re-appeared - continuing to watch for changes..." % (self.GetPathName(),)
self.bReportedFileNotFound = 0 # Once found again we want to start complaining.
changed = (self.fileStat is None) or \
self.fileStat[0] != newstat[0] or \
self.fileStat[6] != newstat[6] or \
self.fileStat[8] != newstat[8] or \
self.fileStat[9] != newstat[9]
if changed:
question = None
if self.IsModified():
question = "%s\r\n\r\nThis file has been modified outside of the source editor.\r\nDo you want to reload it and LOSE THE CHANGES in the source editor?" % self.GetPathName()
mbStyle = win32con.MB_YESNO | win32con.MB_DEFBUTTON2 # Default to "No"
else:
if not self.bAutoReload:
question = "%s\r\n\r\nThis file has been modified outside of the source editor.\r\nDo you want to reload it?" % self.GetPathName()
mbStyle = win32con.MB_YESNO # Default to "Yes"
if question:
rc = win32ui.MessageBox(question, None, mbStyle)
if rc!=win32con.IDYES:
self.bDeclinedReload = 1
return
self.ReloadDocument()
def _DocumentStateChanged(self):
"""Called whenever the documents state (on disk etc) has been changed
by the editor (eg, as the result of a save operation)
"""
if self.GetPathName():
try:
self.fileStat = os.stat(self.GetPathName())
except os.error:
self.fileStat = None
else:
self.fileStat = None
self.watcherThread._DocumentStateChanged()
self._UpdateUIForState()
self._ApplyOptionalToViews("_UpdateUIForState")
self._ApplyOptionalToViews("SetReadOnly", self._IsReadOnly())
self._ApplyOptionalToViews("SCISetSavePoint")
# Allow the debugger to reset us too.
import pywin.debugger
if pywin.debugger.currentDebugger is not None:
pywin.debugger.currentDebugger.UpdateDocumentLineStates(self)
# Read-only document support - make it obvious to the user
# that the file is read-only.
def _IsReadOnly(self):
return self.fileStat is not None and (self.fileStat[0] & 128)==0
def _UpdateUIForState(self):
"""Change the title to reflect the state of the document -
eg ReadOnly, Dirty, etc
"""
filename = self.GetPathName()
if not filename: return # New file - nothing to do
try:
# This seems necessary so the internal state of the window becomes
# "visible". without it, it is still shown, but certain functions
# (such as updating the title) dont immediately work?
self.GetFirstView().ShowWindow(win32con.SW_SHOW)
title = win32ui.GetFileTitle(filename)
except win32ui.error:
title = filename
if self._IsReadOnly():
title = title + " (read-only)"
self.SetTitle(title)
def MakeDocumentWritable(self):
pretend_ss = 0 # Set to 1 to test this without source safe :-)
if not self.scModuleName and not pretend_ss: # No Source Control support.
win32ui.SetStatusText("Document is read-only, and no source-control system is configured")
win32api.MessageBeep()
return 0
# We have source control support - check if the user wants to use it.
msg = "Would you like to check this file out?"
defButton = win32con.MB_YESNO
if self.IsModified():
msg = msg + "\r\n\r\nALL CHANGES IN THE EDITOR WILL BE LOST"
defButton = win32con.MB_YESNO
if win32ui.MessageBox(msg, None, defButton)!=win32con.IDYES:
return 0
if pretend_ss:
print "We are only pretending to check it out!"
win32api.SetFileAttributes(self.GetPathName(), win32con.FILE_ATTRIBUTE_NORMAL)
self.ReloadDocument()
return 1
# Now call on the module to do it.
if self.scModule is None:
try:
self.scModule = __import__(self.scModuleName)
for part in self.scModuleName.split('.')[1:]:
self.scModule = getattr(self.scModule, part)
except:
traceback.print_exc()
print "Error loading source control module."
return 0
if self.scModule.CheckoutFile(self.GetPathName()):
self.ReloadDocument()
return 1
return 0
def CheckMakeDocumentWritable(self):
if self._IsReadOnly():
return self.MakeDocumentWritable()
return 1
def SaveModified(self):
# Called as the document is closed. If we are about
# to prompt for a save, bring the document to the foreground.
if self.IsModified():
frame = self.GetFirstView().GetParentFrame()
try:
frame.MDIActivate()
frame.AutoRestore()
except:
print "Could not bring document to foreground"
return self._obj_.SaveModified()
# NOTE - I DONT use the standard threading module,
# as this waits for all threads to terminate at shutdown.
# When using the debugger, it is possible shutdown will
# occur without Pythonwin getting a complete shutdown,
# so we deadlock at the end - threading is waiting for
import pywin.mfc.thread
import win32event
class FileWatchingThread(pywin.mfc.thread.WinThread):
def __init__(self, doc):
self.doc = doc
self.adminEvent = win32event.CreateEvent(None, 0, 0, None)
self.stopEvent = win32event.CreateEvent(None, 0, 0, None)
self.watchEvent = None
pywin.mfc.thread.WinThread.__init__(self)
def _DocumentStateChanged(self):
win32event.SetEvent(self.adminEvent)
def RefreshEvent(self):
self.hwnd = self.doc.GetFirstView().GetSafeHwnd()
if self.watchEvent is not None:
win32api.FindCloseChangeNotification(self.watchEvent)
self.watchEvent = None
path = self.doc.GetPathName()
if path: path = os.path.dirname(path)
if path:
filter = win32con.FILE_NOTIFY_CHANGE_FILE_NAME | \
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES | \
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE
try:
self.watchEvent = win32api.FindFirstChangeNotification(path, 0, filter)
except win32api.error, exc:
print "Can not watch file", path, "for changes -", exc.strerror
def SignalStop(self):
win32event.SetEvent(self.stopEvent)
def Run(self):
while 1:
handles = [self.stopEvent, self.adminEvent]
if self.watchEvent is not None:
handles.append(self.watchEvent)
rc = win32event.WaitForMultipleObjects(handles, 0, win32event.INFINITE)
if rc == win32event.WAIT_OBJECT_0:
break
elif rc == win32event.WAIT_OBJECT_0+1:
self.RefreshEvent()
else:
win32api.PostMessage(self.hwnd, MSG_CHECK_EXTERNAL_FILE, 0, 0)
try:
# If the directory has been removed underneath us, we get this error.
win32api.FindNextChangeNotification(self.watchEvent)
except win32api.error, exc:
print "Can not watch file", self.doc.GetPathName(), "for changes -", exc.strerror
break
# close a circular reference
self.doc = None
if self.watchEvent:
win32api.FindCloseChangeNotification(self.watchEvent)
|
|
from kivy.app import App
from kivy.uix.stacklayout import StackLayout
from kivy.core.window import Window
from kivy.storage.jsonstore import JsonStore
from kivy.core.clipboard import Clipboard
from Crypto.PublicKey import RSA
import myrsa
import primenum
class Mainpage(StackLayout):
store = JsonStore('data.json')
pub = 'pub'
ownpub = 'ownpub'
ownpriv = 'ownpriv'
#AccordionItem 1
def savepub(self):
input_pubkey = self.ids['input_pubkey']
test = input_pubkey.text
self.store.put(self.pub, val=test)
pass
def clipPup(self):
input_pubkey = self.ids['input_pubkey']
input_pubkey.text = Clipboard.get('UTF8_STRING')
pass
#AccordionItem 2
def clipOwnpup(self):
input_public = self.ids['input_public']
Clipboard.put(input_public.text,'UTF8_STRING')
pass
def genkeys(self):
(privkey, pubKey) = primenum.getTwoRandomPrimes()
(publickey, privatekey) = myrsa.generateRSAKeys(privkey, pubKey)
input_private = self.ids['input_private']
input_private.text = str(publickey)
input_public = self.ids['input_public']
input_public.text = str(privatekey)
pass
def safeownkeys(self):
input_private = self.ids['input_private']
self.store.put(self.ownpriv, val=input_private.text)
input_public = self.ids['input_public']
self.store.put(self.ownpub, val=input_public.text)
pass
def loadownkeys(self):
if self.store.exists(self.ownpriv):
ownpriv = self.store.get(self.ownpriv)['val']
input_private = self.ids['input_private']
input_private.text = str(ownpriv)
pass
if self.store.exists('ownpub'):
ownpub = self.store.get(self.ownpub)['val']
input_public = self.ids['input_public']
input_public.text = str(ownpub)
pass
pass
#AccordionItem 3
def encryptpub(self):
output = self.ids['output_rsa']
input = self.ids['input_rsa']
if self.store.exists(self.pub):
pub = self.store.get(self.pub)['val']
values = pub.strip().replace(")", "").replace("(", "").split(',')
pubKey = []
for x in values:
pubKey.append(int(x))
pass
encode = self.encode(pubKey, input.text)
output.text = encode
pass
else:
output.text = 'Kein Pubkey hinterlegt'
pass
def decryptpub(self):
output = self.ids['output_rsa']
input = self.ids['input_rsa']
if self.store.exists(self.pub):
pub = self.store.get(self.pub)['val']
values = pub.strip().replace(")", "").replace("(", "").split(',')
pubKey = []
for x in values:
pubKey.append(int(x))
pass
decodeval = self.decode(pubKey, input.text)
output.text = decodeval
pass
else:
output.text = 'Kein Pubkey hinterlegt'
pass
def decryptownpriv(self):
output = self.ids['output_rsa']
input = self.ids['input_rsa']
if self.store.exists(self.ownpriv):
key = self.store.get(self.ownpriv)['val']
values = key.strip().replace(")", "").replace("(", "").split(',')
pubKey = []
for x in values:
pubKey.append(int(x))
pass
decodeval = self.decode(pubKey, input.text)
output.text = decodeval
else:
output.text = 'Kein Privkey hinterlegt'
pass
def encryptownpriv(self):
output = self.ids['output_rsa']
input = self.ids['input_rsa']
if self.store.exists(self.ownpriv):
key = self.store.get(self.ownpriv)['val']
values = key.strip().replace(")", "").replace("(", "").split(',')
pubKey = []
for x in values:
pubKey.append(int(x))
pass
encode = self.encode(pubKey, input.text)
output.text = encode
pass
else:
output.text = 'Kein Privkey hinterlegt'
pass
def inputfromclip(self):
input = self.ids['input_rsa']
input.text = Clipboard.get('UTF8_STRING')
pass
def cliptooutput(self):
input_public = self.ids['output_rsa']
Clipboard.put(input_public.text,'UTF8_STRING')
pass
pass
def encode(self, publickey, message):
#each char in message rsa + liste
#-> liste return
n, e = publickey
values = []
for chrx in message:
oVal = ord(chrx)
encrypted_num = (oVal ** e) % n
values.append(encrypted_num)
pass
return ','.join(map(str, values))
def decode(self, privatekey, message):
values = message.split(',')
n, d = privatekey
message = ""
try:
for chrx in values:
number = int(chrx)
decrypted_num = number ** d % n
message += chr(decrypted_num)
pass
except ValueError:
message = "Nachricht nicht valide"
return message
class SafeMsgApp(App):
Window.clearcolor = (.2,.2,.2, 0)
def build(self):
main = Mainpage()
if main.store.exists(main.ownpriv):
ownpriv = main.store.get(main.ownpriv)['val']
input_private = main.ids['input_private']
input_private.text = str(ownpriv)
pass
if main.store.exists('ownpub'):
ownpub = main.store.get(main.ownpub)['val']
input_public = main.ids['input_public']
input_public.text = str(ownpub)
pass
return Mainpage()
if __name__ == '__main__':
SafeMsgApp().run()
|
|
import os, sys, shutil, subprocess, logging, string
import ksl.process, logging.handlers
import ksl.process.util as util
from ksl.process.install.installer import installer
from ksl.process.install.installer import getch
def main():
host_arch = os.uname()[4]
parser = build_parser(host_arch, 'kslinstall')
config_tuple = util.get_file_config(host_arch, 'kslinstall')
config_strings = ['--'+arg+'='+value for arg,value in config_tuple]
file_options = parser.parse_args(config_strings)
options = parser.parse_args(namespace=file_options)
setup_installer_log(options)
if options.version:
print("kslinstall: "+ksl.process.__version__)
sys.exit(0)
if options.interactive:
options.verbose = True
if options.install_file is '':
print ("It appears you forgot to specify an install file!")
parser.print_usage()
return
installer_variants = parse_install_file(options)
for variant in installer_variants:
try:
variant_logs = []
if variant.build_host != os.uname()[4]:
log = logging.getLogger('ksl.installer')
log.info('skipping build %s because it requires build arch %s [host arch: %s]' %
(variant.target_arch+variant.tag, variant.build_host, os.uname()[4]))
continue
variant_logs = setup_package_logs(variant)
setup_target_dir(variant, options)
variant.virtual_install = False # assume real install by default
if options.do_install:
clobber_dir(variant, variant.target_dir, options)
variant.install(options)
if options.do_module:
setup_module_dir(variant, options)
variant.install_module()
close_logs(variant_logs)
except Exception as err:
log = logging.getLogger('ksl.installer')
log.error('error during install of variant %s: %s' % (variant.target_arch+variant.tag, err))
close_logs(variant_logs)
if options.errors_fatal:
raise
def build_parser(host_arch, script_name):
import argparse
sys_file = util.get_sys_file(host_arch, script_name)
usage_str = "kslinstall [options] install_file\nSee %s for default options" % (sys_file)
parser = argparse.ArgumentParser(usage=usage_str)
parser.add_argument('install_file', type=str, nargs='?', help='install_file specifying install', default='')
og = parser.add_argument_group("General ")
og.add_argument("--version",
action="store_true", default=False,
help="print version string and exit")
og.add_argument("-v", "--verbose",
action="store_true",
help="print informational messages to stdout")
og.add_argument("--dry_run", action="store_true",
help="perform a dry run (don't actually execute commands)")
og.add_argument("-f", "--force",
action="store_true",
help="clobber target directories and files")
og.add_argument("-k", "--keep_going",
action="store_false", dest="errors_fatal",
help="Do not abort if any variant builds fail")
og.add_argument("-z", "--interactive",
action="store_true", dest="interactive",
help="Pause for confirmation on each job step")
og.add_argument('-n', "--no_install",
action="store_false", dest="do_install",
help="Skip build/install steps (install module files only)")
og.add_argument('-m', "--no_module",
action="store_false", dest="do_module",
help="Skip installing modulefiles (build/install steps only)")
og.add_argument("-c", "--module_cmd",
action="store", dest="module_cmd",
help="module command")
og = parser.add_argument_group("Input Paths ")
og.add_argument("--source_paths",
action="store", dest="source_paths", type=str,
help="source paths to search ")
og.add_argument("--overlay_paths",
action="store", dest="overlay_paths", type=str,
help="overlay paths to search")
og.add_argument("--patch_paths",
action="store", dest="patch_paths", type=str,
help="patch paths to search")
og.add_argument("-t", "--module_template",
action="store", dest="module_template", type=str,
help="template module file ")
og = parser.add_argument_group("Output Paths ")
og.add_argument("-b", "--build_dir",
action="store", dest="build_dir", type=str,
help="working directory for builds")
og.add_argument("--root_install_dir",
action="store", dest="root_install_dir", type=str,
help="root directory to install packages to")
og.add_argument("--root_module_dir",
action="store", dest="root_module_dir", type=str,
help="root module directory to install modulefiles to")
return parser
def setup_installer_log(options):
if options.verbose:
logging.basicConfig(level=logging.INFO,
format='%(message)s')
else:
logging.basicConfig(level=logging.WARNING,
format='%(message)s')
# ten backup files at 100MB for a total of ~1GB potential logs
syslog = logging.handlers.RotatingFileHandler('/opt/share/ksl/system/logs/ksl_install.log',
mode='a',
maxBytes=104857600,
backupCount=10)
syslog.setLevel(logging.DEBUG)
syslogf = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s', '%m-%d %H:%M')
syslog.setFormatter(syslogf)
logging.getLogger('ksl.installer').addHandler(syslog)
def setup_target_dir(variant, options):
variant.target_dir = os.path.join(
variant.root_install_dir,variant.name,variant.version,variant.target_arch+variant.tag)
def setup_module_dir(variant, options):
log = logging.getLogger('ksl.installer')
variant_modules_dir = os.path.join(options.root_module_dir, variant.target_arch, variant.name)
if not os.path.exists(variant_modules_dir):
log.info("creating empty directory %s" % variant_modules_dir)
os.makedirs(variant_modules_dir)
module_name = variant.version+variant.tag
if variant.tag == "":
version_file = os.path.join(variant_modules_dir,'.version')
if os.path.exists(version_file):
if not options.force:
print("Can I clobber module default version file %s? [y/n]: " % version_file)
confirmation = getch()
if confirmation != 'y':
raise Exception(
"unwilling to clobber version file %s" % version_file)
log.info("deleting version file %s" % version_file)
os.remove(version_file)
log.info("setting default module version for %s on %s to %s " %
(variant.name, variant.target_arch, variant.version))
file_handle = open(version_file, 'w')
file_handle.write('#%%Module1.0\nset ModulesVersion "%s"\n' % variant.version)
file_handle.close()
else:
log.info("installing non-default variant to module %s" % module_name)
variant.module_file = os.path.join(variant_modules_dir, module_name)
def clobber_dir(variant, clobber_dir, options):
log = logging.getLogger('ksl.installer')
if os.path.exists(clobber_dir):
if not options.force:
print("Can I clobber directory %s? [y/n]: " % clobber_dir)
confirmation = getch()
if confirmation != 'y':
raise Exception(
"unwilling to clobber directory %s" % clobber_dir)
log.info("clobbering directory %s" % clobber_dir)
shutil.rmtree(clobber_dir)
log.info("creating empty directory %s" % clobber_dir)
os.makedirs(clobber_dir)
def setup_package_logs(variant):
from datetime import datetime
root = '/opt/share/ksl/system/logs/installs'
prefix = variant.name+'-'+variant.version+'-'+variant.release+'-'+str(datetime.date(datetime.today()))+"-"+str(datetime.time(datetime.now()))+'_'+variant.target_arch+variant.tag
logf = logging.Formatter('%(asctime)s %(name)s:\n%(message)s')
logname = os.path.join(root,prefix+'_install.log')
plog = logging.FileHandler(logname,'w')
plog.setFormatter(logf)
logging.getLogger('ksl.installer.package').addHandler(plog)
configurelog = logging.FileHandler(os.path.join(root,prefix+'_configure.log'),'w')
configurelog.setFormatter(logf)
configurelog.propagate=False
logging.getLogger('ksl.installer.package.configure').addHandler(configurelog)
patchlog = logging.FileHandler(os.path.join(root,prefix+'_patch.log'),'w')
patchlog.setFormatter(logf)
patchlog.propagate=False
logging.getLogger('ksl.installer.package.patch').addHandler(patchlog)
makelog = logging.FileHandler(os.path.join(root,prefix+'_make.log'),'w')
makelog.setFormatter(logf)
makelog.propagate=False
logging.getLogger('ksl.installer.package.make').addHandler(makelog)
log = logging.getLogger('ksl.installer')
log.info('logging install for build %s to %s' % (variant.target_arch+variant.tag, logname))
return [(plog, 'ksl.installer.package'),
(configurelog, 'ksl.installer.package.configure'),
(patchlog, 'ksl.installer.package.patch'),
(makelog, 'ksl.installer.package.make')]
def close_logs(logs):
for handle, logname in logs:
handle.flush()
handle.close()
logging.getLogger(logname).removeHandler(handle)
return
def parse_install_file(options):
log = logging.getLogger('ksl.installer')
install_file = options.install_file
if not os.path.exists(install_file):
raise Exception("couldn't locate install file: %s" % install_file)
log.info("parsing install file %s" % install_file)
exec(compile(open(install_file).read(), install_file, 'exec'), globals())
for variant in variants:
variant.root_install_dir = options.root_install_dir
variant.source_paths = options.source_paths
variant.patch_paths = options.patch_paths
variant.overlay_paths = options.overlay_paths
variant_build_path = variant.name+'-'+variant.version+variant.tag
variant.build_dir = os.path.join(options.build_dir,variant_build_path)
variant.module_cmd = options.module_cmd
variant.module_template = options.module_template
return variants
if __name__ == "__main__":
run()
def run():
try:
main()
except SystemExit:
raise
except KeyboardInterrupt:
print("""
================================================================================
|| *Interrupted by user* ||
================================================================================
""")
raise
except:
print("""
================================================================================
|| *There was some sort of error running the script* ||
|| Please report to Aron Ahmadia <aron.ahmadia@kaust.edu.sa> ||
================================================================================
Error stack follows
""")
raise
|
|
from __future__ import unicode_literals
import re
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.functional import SimpleLazyObject
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
def _lazy_re_compile(regex, flags=0):
"""Lazily compile a regex with flags."""
def _compile():
# Compile the regex if it was not passed pre-compiled.
if isinstance(regex, six.string_types):
return re.compile(regex, flags)
else:
assert not flags, "flags must be empty if regex is passed pre-compiled"
return regex
return SimpleLazyObject(_compile)
@deconstructible
class RegexValidator(object):
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, six.string_types):
raise TypeError("If the flags are set, regex must be a regular expression string.")
self.regex = _lazy_re_compile(self.regex, self.flags)
def __call__(self, value):
"""
Validates that the input matches the regular expression
if inverse_match is False, otherwise raises ValidationError.
"""
if not (self.inverse_match is not bool(self.regex.search(
force_text(value)))):
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
def __ne__(self, other):
return not (self == other)
@deconstructible
class URLValidator(RegexValidator):
ul = '\u00a1-\uffff' # unicode letters range (must be a unicode string, not a raw string)
# IP patterns
ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later)
# Host patterns
hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?'
# Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1
domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*'
tld_re = r'\.(?:[a-z' + ul + r']{2,63}|xn--[a-z0-9]{1,59})\.?'
host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)'
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately
r'(?:\S+(?::\S*)?@)?' # user:pass authentication
r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')'
r'(?::\d{2,5})?' # port
r'(?:[/?#][^\s]*)?' # resource path
r'\Z', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super(URLValidator, self).__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
value = force_text(value)
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super(URLValidator, self).__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
scheme, netloc, path, query, fragment = urlsplit(value)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
# Now verify IPv6 in the netloc part
host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc)
if host_match:
potential_ip = host_match.groups()[0]
try:
validate_ipv6_address(potential_ip)
except ValidationError:
raise ValidationError(self.message, code=self.code)
url = value
# The maximum length of a full host name is 253 characters per RFC 1034
# section 3.1. It's defined to be 255 bytes or less, but this includes
# one byte for the length of the name and one byte for the trailing dot
# that's used to indicate absolute names in DNS.
if len(urlsplit(value).netloc) > 253:
raise ValidationError(self.message, code=self.code)
integer_validator = RegexValidator(
_lazy_re_compile('^-?\d+\Z'),
message=_('Enter a valid integer.'),
code='invalid',
)
def validate_integer(value):
return integer_validator(value)
@deconstructible
class EmailValidator(object):
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = _lazy_re_compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE)
domain_regex = _lazy_re_compile(
# max length for domain name labels is 63 characters per RFC 1034
r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z',
re.IGNORECASE)
literal_regex = _lazy_re_compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]\Z',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
value = force_text(value)
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.validate_domain_part(domain_part):
return
except UnicodeError:
pass
raise ValidationError(self.message, code=self.code)
def validate_domain_part(self, domain_part):
if self.domain_regex.match(domain_part):
return True
literal_match = self.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
pass
return False
def __eq__(self, other):
return (
isinstance(other, EmailValidator) and
(self.domain_whitelist == other.domain_whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
validate_email = EmailValidator()
slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z')
validate_slug = RegexValidator(
slug_re,
_("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."),
'invalid'
)
slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z', re.U)
validate_unicode_slug = RegexValidator(
slug_unicode_re,
_("Enter a valid 'slug' consisting of Unicode letters, numbers, underscores, or hyphens."),
'invalid'
)
ipv4_re = _lazy_re_compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z')
validate_ipv4_address = RegexValidator(ipv4_re, _('Enter a valid IPv4 address.'), 'invalid')
def validate_ipv6_address(value):
if not is_valid_ipv6_address(value):
raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid')
def validate_ipv46_address(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')),
'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')),
'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters returns the appropriate validators for
the GenericIPAddressField.
This code is here, because it is exactly the same for the model and the form field.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_address_validator_map)))
def int_list_validator(sep=',', message=None, code='invalid'):
regexp = _lazy_re_compile('^\d+(?:%s\d+)*\Z' % re.escape(sep))
return RegexValidator(regexp, message=message, code=code)
validate_comma_separated_integer_list = int_list_validator(
message=_('Enter only digits separated by commas.'),
)
@deconstructible
class BaseValidator(object):
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value, message=None):
self.limit_value = limit_value
if message:
self.message = message
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value}
if self.compare(cleaned, self.limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
(self.limit_value == other.limit_value)
and (self.message == other.message)
and (self.code == other.code)
)
@deconstructible
class MaxValueValidator(BaseValidator):
compare = lambda self, a, b: a > b
message = _('Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
@deconstructible
class MinValueValidator(BaseValidator):
compare = lambda self, a, b: a < b
message = _('Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
@deconstructible
class MinLengthValidator(BaseValidator):
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_length'
@deconstructible
class MaxLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = ungettext_lazy(
'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'max_length'
@deconstructible
class DecimalValidator(object):
"""
Validate that the input does not exceed the maximum number of digits
expected, otherwise raise ValidationError.
"""
messages = {
'max_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'
),
'max_decimal_places': ungettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'
),
'max_whole_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'
),
}
def __init__(self, max_digits, decimal_places):
self.max_digits = max_digits
self.decimal_places = decimal_places
def __call__(self, value):
digit_tuple, exponent = value.as_tuple()[1:]
decimals = abs(exponent)
# digit_tuple doesn't include any leading zeros.
digits = len(digit_tuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None
and whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.max_digits == other.max_digits and
self.decimal_places == other.decimal_places
)
|
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.autograd import Variable
from tensorboardX import SummaryWriter
dummy_input = (torch.zeros(1, 3),)
class LinearInLinear(nn.Module):
def __init__(self):
super(LinearInLinear, self).__init__()
self.l = nn.Linear(3, 5)
def forward(self, x):
return self.l(x)
with SummaryWriter(comment='LinearInLinear') as w:
w.add_graph(LinearInLinear(), dummy_input, True)
class MultipleInput(nn.Module):
def __init__(self):
super(MultipleInput, self).__init__()
self.Linear_1 = nn.Linear(3, 5)
def forward(self, x, y):
return self.Linear_1(x+y)
with SummaryWriter(comment='MultipleInput') as w:
w.add_graph(MultipleInput(), (torch.zeros(1, 3), torch.zeros(1, 3)), True)
class MultipleOutput(nn.Module):
def __init__(self):
super(MultipleOutput, self).__init__()
self.Linear_1 = nn.Linear(3, 5)
self.Linear_2 = nn.Linear(3, 7)
def forward(self, x):
return self.Linear_1(x), self.Linear_2(x)
with SummaryWriter(comment='MultipleOutput') as w:
w.add_graph(MultipleOutput(), dummy_input, True)
class MultipleOutput_shared(nn.Module):
def __init__(self):
super(MultipleOutput_shared, self).__init__()
self.Linear_1 = nn.Linear(3, 5)
def forward(self, x):
return self.Linear_1(x), self.Linear_1(x)
with SummaryWriter(comment='MultipleOutput_shared') as w:
w.add_graph(MultipleOutput_shared(), dummy_input, True)
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
def forward(self, x):
return x * 2
model = SimpleModel()
dummy_input = (torch.zeros(1, 2, 3),)
with SummaryWriter(comment='constantModel') as w:
w.add_graph(model, dummy_input, True)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
# self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = F.relu(out)
return out
dummy_input = torch.rand(1, 3, 224, 224)
with SummaryWriter(comment='basicblock') as w:
model = BasicBlock(3, 3)
w.add_graph(model, (dummy_input, ), verbose=True)
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
self.bn = nn.BatchNorm2d(20)
def forward(self, x):
x = F.max_pool2d(self.conv1(x), 2)
x = F.relu(x) + F.relu(-x)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = self.bn(x)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
x = F.softmax(x, dim=1)
return x
class Net2(nn.Module):
def __init__(self):
super(Net2, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x
dummy_input = Variable(torch.rand(13, 1, 28, 28))
model = Net1()
with SummaryWriter(comment='Net1') as w:
w.add_graph(model, (dummy_input, ))
model = Net2()
with SummaryWriter(comment='Net2') as w:
w.add_graph(model, (dummy_input, ))
class SiameseNetwork(nn.Module):
def __init__(self):
super(SiameseNetwork, self).__init__()
self.cnn1 = Net1()
def forward_once(self, x):
output = self.cnn1(x)
return output
def forward(self, input1, input2):
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
model = SiameseNetwork()
with SummaryWriter(comment='SiameseNetwork') as w:
w.add_graph(model, (dummy_input, dummy_input))
dummy_input = torch.Tensor(1, 3, 224, 224)
with SummaryWriter(comment='alexnet') as w:
model = torchvision.models.alexnet()
w.add_graph(model, (dummy_input, ))
with SummaryWriter(comment='vgg19') as w:
model = torchvision.models.vgg19()
w.add_graph(model, (dummy_input, ))
with SummaryWriter(comment='densenet121') as w:
model = torchvision.models.densenet121()
w.add_graph(model, (dummy_input, ))
with SummaryWriter(comment='resnet18') as w:
model = torchvision.models.resnet18()
w.add_graph(model, (dummy_input, ))
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(
n_categories +
input_size +
hidden_size,
hidden_size)
self.i2o = nn.Linear(
n_categories +
input_size +
hidden_size,
output_size)
self.o2o = nn.Linear(hidden_size + output_size, output_size)
self.dropout = nn.Dropout(0.1)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, category, input, hidden):
input_combined = torch.cat((category, input, hidden), 1)
hidden = self.i2h(input_combined)
output = self.i2o(input_combined)
output_combined = torch.cat((hidden, output), 1)
output = self.o2o(output_combined)
output = self.dropout(output)
output = self.softmax(output)
return output, hidden, input
def initHidden(self):
return torch.zeros(1, self.hidden_size)
n_letters = 100
n_hidden = 128
n_categories = 10
rnn = RNN(n_letters, n_hidden, n_categories)
cat = torch.Tensor(1, n_categories)
dummy_input = torch.Tensor(1, n_letters)
hidden = torch.Tensor(1, n_hidden)
out, hidden, input = rnn(cat, dummy_input, hidden)
with SummaryWriter(comment='RNN') as w:
w.add_graph(rnn, (cat, dummy_input, hidden), verbose=False)
lstm = torch.nn.LSTM(3, 3) # Input dim is 3, output dim is 3
inputs = [torch.randn(1, 3) for _ in range(5)] # make a sequence of length 5
# initialize the hidden state.
hidden = (torch.randn(1, 1, 3),
torch.randn(1, 1, 3))
for i in inputs:
out, hidden = lstm(i.view(1, 1, -1), hidden)
with SummaryWriter(comment='lstm') as w:
w.add_graph(lstm, (torch.randn(1, 3).view(1, 1, -1), hidden), verbose=True)
import pytest
print('expect error here:')
with pytest.raises(Exception) as e_info:
dummy_input = torch.rand(1, 1, 224, 224)
with SummaryWriter(comment='basicblock_error') as w:
w.add_graph(model, (dummy_input, )) # error
|
|
# ======================================================================================================================
# Libraries and Modules
# Core modules
import numpy as np
import matplotlib.pyplot as plt
import illustris_python as ilpy
import translate_coordinates as tc #renato's code for camera projections
import tng_api_utils as tau
# Used only sparingly (maybe remove dependencies?)
import os
import astropy.io.ascii as ascii
import astropy
import astropy.io.fits as fits
import astropy.units as u
from astropy.cosmology import WMAP7,z_at_value
import copy
# Constants
ilh = tau.tngh # Little H (H_0/100) is set to 0.704
illcos = tau.tngcos # Our cosmology is taken from astropy.
# It uses astropy.cosmology.FlatLambdaCDM(H0=70.4,Om0=0.2726,Ob0=0.0456)
#======================================================================================================================
class lightcone_catalog:
# This class holds the
def __init__(self,lightconefile,base_dir,mass_limit=(10.0**9.5),sfr_limit=0.0,mag_limit=None):
lc_data = ascii.read(lightconefile)
print("Initializing Lightcone File: ", lightconefile)
print(lc_data)
self.lightconefile = lightconefile
self.cylinder_number = np.int32(lc_data['col1'].data)
self.snapshot_string = lc_data['col2'].data
self.snapshot_redshift = lc_data['col3'].data
self.v_Ingress_x_cmh = lc_data['col4'].data
self.v_Ingress_y_cmh = lc_data['col5'].data
self.v_Ingress_z_cmh = lc_data['col6'].data
self.v_Egress_x_cmh = lc_data['col7'].data
self.v_Egress_y_cmh = lc_data['col8'].data
self.v_Egress_z_cmh = lc_data['col9'].data
self.v_Ingress_x_kpc = lc_data['col10'].data
self.v_Ingress_y_kpc = lc_data['col11'].data
self.v_Ingress_z_kpc = lc_data['col12'].data
self.v_Camera_x_kpc = lc_data['col13'].data
self.v_Camera_y_kpc = lc_data['col14'].data
self.v_Camera_z_kpc = lc_data['col15'].data
self.v_Offset_x_kpc = lc_data['col16'].data
self.v_Offset_y_kpc = lc_data['col17'].data
self.v_Offset_z_kpc = lc_data['col18'].data
self.fov_kpc = lc_data['col19'].data
self.center_redshift = lc_data['col20'].data
self.radius_buffer_cmh = lc_data['col21'].data
xs = None
xd = None
self.L_comoving = None
lines = open(lightconefile,'r')
for l in lines:
if "Comoving Single Box L" in l:
self.L_comoving = np.float32(l.split()[-1])
self.L_comovingh = round(self.L_comoving*ilh,4)
if "Delta Unit Vector" in l:
ss = l.split("[")[-1].split("]")[0].split()
xs = ss[0]
ys = ss[1]
zs = ss[2]
if "Direction Unit Vector" in l:
ss = l.split("[")[-1].split("]")[0].split()
xd = ss[0]
yd = ss[1]
zd = ss[2]
if "del B" in l:
self.delb_arcmin = np.float32(l.split()[-1])
if "del A" in l:
self.dela_arcmin = np.float32(l.split()[-1])
lines.close()
assert xs is not None
assert xd is not None
assert self.L_comoving is not None
#camdir
#just the direction unit vector from the lightcone file
self.camdir_x = np.float32(xd)
self.camdir_y = np.float32(yd)
self.camdir_z = np.float32(zd)
#camup
#just the delta unit vector from the lightcone file
self.camup_x = np.float32(xs)
self.camup_y = np.float32(ys)
self.camup_z = np.float32(zs)
print(" Direction vector: ", self.camdir_x, self.camdir_y, self.camdir_z)
print(" Up vector: ", self.camup_x, self.camup_y, self.camup_z)
print(" B FOV, arcmin: ", self.delb_arcmin)
print(" A FOV, arcmin: ", self.dela_arcmin)
print(" Ls, Mpc: ", self.L_comoving, self.L_comovingh)
self.norm_degrees = self.delb_arcmin/60.0
# This initialization line isn't needed since we're now writing to a file while we do our analysis
#self.cylinder_object_list = []
self.base_dir = base_dir
self.mass_limit = mass_limit
self.sfr_limit = sfr_limit
self.mag_limit = mag_limit
return
#0 - gas
#1 - DM
#4 - stars & WIND
#5 - BHs
def process_lightcone(self,minz=0.0,maxz=20.0,outfile='output.txt'):
# Initialize values
cmd_total = 0.0
cmx = 0.0
cmy = 0.0
cmz = 0.0
# Open file for writing
print(" Opening file for saving: ", outfile)
fobj = open(outfile,'w')
fobj.write('## Lightcone Catalog File for input geometry: '+self.lightconefile+'\n')
fobj.write('## Catalog source directory: '+self.base_dir+'\n')
fobj.write('## Square FOV (arcmin): {:12.6f}'.format(self.delb_arcmin)+'\n')
fobj.write('## Area (arcmin^2): {:12.6f}'.format(self.delb_arcmin**2)+'\n')
fobj.write('## Baryonic Mass Lower Limit (Msun) : {:10.5e}'.format(self.mass_limit)+'\n')
fobj.write('## Assumed Cosmology: '+WMAP7.__str__()+'\n')
fobj.write('## Creator: Teddy Pena (STScI) \n')
fobj.write('## Catalog & Data Release Reference: Nelson et al. (2019) \n')
fobj.write('## Catalog & Data Release URL: tng-project.org/data \n')
fobj.write('## Column 01: Snapshot number \n')
fobj.write('## Column 02: Subhalo Index \n')
fobj.write('## Column 03: RA (degrees) \n')
fobj.write('## Column 04: DEC (degrees) \n')
fobj.write('## Column 05: RA (proper kpc at true z) \n')
fobj.write('## Column 06: DEC (proper kpc at true z) \n')
fobj.write('## Column 07: RA (proper kpc at inferred z) \n')
fobj.write('## Column 08: DEC (proper kpc at inferred z) \n')
fobj.write('## Column 09: True cosmological redshift \n')
fobj.write('## Column 10: Inferred redshift (includes peculiar v) \n')
fobj.write('## Column 11: Peculiar redshift; Peculiar Velocity / Speed of Light \n')
fobj.write('## Column 12: True scale at cosmological z, in kpc/arcsec \n')
fobj.write('## Column 13: [Mpc] Comoving X in Observer Coordinates \n')
fobj.write('## Column 14: [Mpc] Comoving Y in Observer Coordinates \n')
fobj.write('## Column 15: [Mpc] Comoving Z in Observer Coordinates \n')
fobj.write('## Column 16: [Mpc] True Angular Diameter Distance to observer \n')
fobj.write('## Column 17: [Mpc] Inferred Angular Diameter Distance to observer \n')
fobj.write('## Column 18: Snapshot redshift \n')
fobj.write('## Column 19: Geometrically appropriate redshift at center of this cylinder \n')
fobj.write('## Column 20: Lightcone cylinder number \n')
fobj.write('## Column 21: [Msun] Stellar mass within 2X stellar half mass radius\n')
fobj.write('## Column 22: [Msun] Total gas mass within 2X stellar half mass radius\n')
fobj.write('## Column 23: [Msun] Total mass of this subhalo (excludes children subhalos) \n')
fobj.write('## Column 24: [Msun] Total BH mass within 2X stellar half mass radius\n')
fobj.write('## Column 25: [Msun] Total baryon mass within 2X stellar half mass radius\n')
fobj.write('## Column 26: [Msun/year] SFR within 2X stellar half mass radius\n')
fobj.write('## Column 27: [(10^10 Msun/h) / (0.978 Gyr/h)] Total BH accretion rate within subhalo\n')
fobj.write('## Column 28: [Mpc] Camera X in Observer Coordinates (Proper X at z; a transverse coordinate) \n')
fobj.write('## Column 29: [Mpc] Camera Y in Observer Coordinates (Proper Y at z; a transverse coordinate)\n')
fobj.write('## Column 30: [Mpc] Camera Z in Observer Coordinates (Proper Z at z; should be almost exactly Column 16)\n')
fobj.write('## Column 31: [AB Mag] Intrinsic stellar g absolute magnitude (BC03) \n')
fobj.write('## Column 32: [AB Mag] Intrinsic stellar r absolute magnitude (BC03) \n')
fobj.write('## Column 33: [AB Mag] Intrinsic stellar i absolute magnitude (BC03) \n')
fobj.write('## Column 34: [AB Mag] Intrinsic stellar z absolute magnitude (BC03) \n')
fobj.write('## Column 35: [km/s] Galaxy motion in transverse Camera X direction \n')
fobj.write('## Column 36: [km/s] Galaxy motion in transverse Camera Y direction \n')
fobj.write('## Column 37: [km/s] Galaxy motion in line-of-sight Camera Z direction ; the Peculiar Velocity \n')
fobj.write('## Column 38: [km/s] Cosmological expansion velocity at true z (Column 10 measures Column 37+38)\n')
fobj.write('## Column 39: [AB Mag] Apparent total rest-frame g-band magnitude (BC03) \n')
for i,cyl in enumerate(self.cylinder_number):
cmd_thiscyl = ( (self.v_Egress_x_cmh[i]/ilh - self.v_Ingress_x_cmh[i]/ilh)**2 + (self.v_Egress_y_cmh[i]/ilh - self.v_Ingress_y_cmh[i]/ilh)**2 + (self.v_Egress_z_cmh[i]/ilh - self.v_Ingress_z_cmh[i]/ilh)**2 )**0.5
cmd_begin = cmd_total
cmd_end = cmd_begin + cmd_thiscyl
cmd_total = cmd_end
cz=self.center_redshift[i]
#world coordinates of ingress point
cmx_begin = 1.0*cmx
cmy_begin = 1.0*cmy
cmz_begin = 1.0*cmz
#world coordinates of egress points
cmx = cmx_begin + (self.v_Egress_x_cmh[i]/ilh - self.v_Ingress_x_cmh[i]/ilh)
cmy = cmy_begin + (self.v_Egress_y_cmh[i]/ilh - self.v_Ingress_y_cmh[i]/ilh)
cmz = cmz_begin + (self.v_Egress_z_cmh[i]/ilh - self.v_Ingress_z_cmh[i]/ilh)
if i > 1000:
continue
if cz < minz:
continue
if cz > maxz:
continue
testf = 'test_'+str(cyl)+'.pdf'
f1 = plt.figure(figsize=(10.5,10.5), dpi=150)
plt.subplots_adjust(left=0.11, right=0.98, bottom=0.08, top=0.99,wspace=0.25,hspace=0.25)
skip = 500
#determine snapshot of interest
print("Processing Cylinder: ", cyl, i, self.snapshot_redshift[i])
snapnum = self.snapshot_string[i]
# Not relevant for us because we're doing tng simulations
#old corrupt snaps for illustris-1
#if snapnum==53:
# snapnum=52
#if snapnum==55:
# snapnum=54
print(" Snapshot Number: ", snapnum)
# Load the data
fields=['SubhaloMass','SubhaloMassInMaxRad','SubhaloMassInRadType','SubhaloMassInMaxRadType','SubhaloPos','SubhaloSFR','SubhaloSFRinRad','SubhaloVel','SubhaloBHMass','SubhaloBHMdot','SubhaloStellarPhotometrics','SubhaloWindMass']
subhalos = ilpy.groupcat.loadSubhalos(self.base_dir,snapnum,fields=fields)
print(" Loaded subhalos: ", subhalos['count'], subhalos['SubhaloMassInRadType'].shape)
# Clean the loaded subhalo dictionaries
mstar_msun = subhalos['SubhaloMassInRadType'][:,4]*(1.0e10)/ilh
mgas_msun = subhalos['SubhaloMassInRadType'][:,0]*(1.0e10)/ilh #includes wind mass
mbh_msun = subhalos['SubhaloMassInRadType'][:,5]*(1.0e10)/ilh
baryonmass_msun = mstar_msun + mgas_msun + mbh_msun #within 2x stellar half mass radius... best?
mhalo_msun = subhalos['SubhaloMass']*(1.0e10)/ilh
sfr = subhalos['SubhaloSFR']*1.0
gmag_ABabs=subhalos['SubhaloStellarPhotometrics'][:,4]*1.0
distmod=illcos.distmod(cz).value
gmag=gmag_ABabs+distmod
if self.mag_limit is None:
mi = np.where(np.logical_and(baryonmass_msun > self.mass_limit, sfr >self.sfr_limit))[0]
else:
mi = np.where(np.logical_and(gmag < self.mag_limit,baryonmass_msun > 0.0))[0]
if mi.shape[0]==0:
cylinder_obj = None
self.cylinder_object_list.append(cylinder_obj)
continue
f1.close()
print(" Selected number: ", mi.shape)
print(" Mstar statistics: ", np.min(mstar_msun[mi]), np.max(mstar_msun[mi]), np.median(mstar_msun[mi]))
print(" Mgas statistics: ", np.min(mgas_msun[mi]), np.max(mgas_msun[mi]), np.median(mgas_msun[mi]))
print(" Mag statistics : ", np.min(gmag[mi]), np.max(gmag[mi]), np.median(gmag[mi]))
for key in subhalos.keys():
if key == 'count':
continue
filtered_data = subhalos[key][mi]
subhalos[key] = filtered_data
# Now, periodicize
subhalos = self.periodicize(subhalos,self.L_comovingh*1000.0)
xpos = subhalos['SubhaloPos'][:,0] #in cKpc/h of max bound part
ypos = subhalos['SubhaloPos'][:,1]
zpos = subhalos['SubhaloPos'][:,2]
#project geometry
#campos
#in phys kpc, offset values from lightcone file!
xoff = self.v_Offset_x_kpc[i]
yoff = self.v_Offset_y_kpc[i]
zoff = self.v_Offset_z_kpc[i]
#the position here I think doesn't matter???
camera = tc.Camera([0,0,0],[self.camdir_x,self.camdir_y,self.camdir_z],[self.camup_x,self.camup_y,self.camup_z])
#galaxy world position
#convert to phys kpc following Renato's lead in translate_coordinates.py
#note there's an extra translation in the sunrise calcs, so we can discard that here
#box coordinates relative to ingress coordinate
boxX = (xpos/ilh) - self.v_Ingress_x_cmh[i]/ilh
boxY = (ypos/ilh) - self.v_Ingress_y_cmh[i]/ilh
boxZ = (zpos/ilh) - self.v_Ingress_z_cmh[i]/ilh
axi = f1.add_subplot(2,2,1)
axi.set_ylabel('boxI X',size=7,labelpad=1)
axi.set_xlabel('boxI Z',size=7,labelpad=1)
axi.tick_params(axis='both',which='major',labelsize=7)
axi.plot(boxZ[::skip],boxX[::skip],'ok')
#add box coordinate to world coordinate of ingress point
worldX = boxX+cmx_begin
worldY = boxY+cmy_begin
worldZ = boxZ+cmz_begin
axi = f1.add_subplot(2,2,2)
axi.set_ylabel('world X',size=7,labelpad=1)
axi.set_xlabel('world Z',size=7,labelpad=1)
axi.tick_params(axis='both',which='major',labelsize=7)
axi.plot(worldZ[::skip],worldX[::skip],'ok')
axi.plot([np.min(worldZ),np.max(worldZ)],[cmx_begin,cmx_begin],color='red')
velX = subhalos['SubhaloVel'][:,0]
velY = subhalos['SubhaloVel'][:,1]
velZ = subhalos['SubhaloVel'][:,2]
#galaxy cam position, in comoving kpc
galaxy_camera_posx,galaxy_camera_posy,galaxy_camera_posz = camera.cameraCoordinates_vector(worldX,worldY,worldZ)
galaxy_camera_velx,galaxy_camera_vely,galaxy_camera_velz = camera.cameraCoordinates_vector(velX,velY,velZ)
axi = f1.add_subplot(2,2,3)
axi.set_ylabel('cam X',size=7,labelpad=1)
axi.set_xlabel('cam Z',size=7,labelpad=1)
axi.tick_params(axis='both',which='major',labelsize=7)
axi.plot(galaxy_camera_posz[::skip],galaxy_camera_posx[::skip],'ok')
#galaxy projection using spherical coords
y1 = np.arctan2(galaxy_camera_posx,galaxy_camera_posz)/(0.5*(self.delb_arcmin/60.0)*(np.pi/180.0))
y2 = np.arctan2(galaxy_camera_posy,galaxy_camera_posz)/(0.5*(self.delb_arcmin/60.0)*(np.pi/180.0))
#range = [-1,1] = FOV = self.norm_degrees
axi = f1.add_subplot(2,2,4)
axi.set_ylabel('cam Y1',size=7,labelpad=1)
axi.set_xlabel('cam Y2',size=7,labelpad=1)
axi.set_xlim(-3,3)
axi.set_ylim(-3,3)
axi.tick_params(axis='both',which='major',labelsize=7)
axi.plot(y1,y2,'ok',markersize=0.5,mew=0.0)
axi.plot([-1,-1],[-1,1],color='red')
axi.plot([-1,1],[-1,-1],color='red')
axi.plot([1,1],[-1,1],color='red')
axi.plot([-1,1],[1,1],color='red')
#all values correspond to mi vector
#cull by RA, DEC, and segment length
ci = np.where(np.logical_and(np.logical_and(np.logical_and(np.abs(y1) <= 1.0, np.abs(y2) <= 1.0),galaxy_camera_posz <= cmd_end),galaxy_camera_posz > cmd_begin))[0]
print(" Selected N galaxies in FOV: ", ci.shape)
axi.plot(y1[ci],y2[ci],'or',markersize=0.7,mew=0.0)
RA_deg = y1[ci]*self.norm_degrees/2.0
DEC_deg = y2[ci]*self.norm_degrees/2.0
#save interesting quantities
if ci.shape[0] > 0:
print(cyl, cmd_begin, np.min(galaxy_camera_posz[ci]))
print(cyl, cmd_end, np.max(galaxy_camera_posz[ci]))
cylinder_obj = cylinder_catalog(snapnum,subhalos,ci,RA_deg,DEC_deg, self.snapshot_redshift[i],
galaxy_camera_posx,galaxy_camera_posy,galaxy_camera_posz,self.center_redshift[i],
galaxy_camera_velx,galaxy_camera_vely,galaxy_camera_velz,cyl,gmag)
else:
cylinder_obj = None
self.cylinder_object_list.append(cylinder_obj)
#f1.savefig(testf)
plt.close(f1)
# Here we write to the file. Note that we're still in the for loop
# cycling through the snapshot number.
for i,shi in enumerate(self.subhalo_index):
thisline = '{:8d}{:12d} {:12.6f} {:12.6f} {:10.2f} {:10.2f} {:10.2f} {:10.2f} '\
'{:12.8f} {:12.8f} {:12.4e} {:8.4f} '\
'{:10.4f} {:10.4f} {:16.4f} {:16.4f} {:16.4f} {:12.8f} {:12.8f} {:8d}'\
'{:12.4e} {:12.4e} {:12.4e} {:12.4e} {:12.4e} {:16.4f} {:10.4e}'\
' {:10.4f} {:10.4f} {:16.4f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:12.4e} {:8.2f}'\
'\n'.format(self.snapshot_number[i],shi,self.RA_deg[i],self.DEC_deg[i],self.RA_kpc[i],self.DEC_kpc[i],self.observed_RA_kpc[i],self.observed_DEC_kpc[i],
self.cosmological_redshift[i],self.galaxy_observed_z[i],self.galaxy_peculiar_z[i],self.kpc_per_arcsec[i],
self.galaxy_comoving_x_mpc[i],self.galaxy_comoving_y_mpc[i],self.galaxy_comoving_z_mpc[i],self.angdiam_mpc[i],
self.observed_angdiam_mpc[i],self.snapz[i],self.center_z[i],self.cylinder_number[i],
self.mstar_msun[i],self.mgas_msun[i],self.mhalo_msun[i],self.mbh_msun[i],self.baryonmass_msun[i],self.sfr[i],self.bhmdot[i],
self.galaxy_camera_posx[i],self.galaxy_camera_posy[i],self.galaxy_camera_posz[i],
self.gmag[i],self.rmag[i],self.imag[i],self.zmag[i],
self.galaxy_camera_velx[i],self.galaxy_camera_vely[i],self.galaxy_camera_velz[i],self.hubble_velocity[i],self.gmag_apparent[i])
fobj.write(thisline)
fobj.close()
return
def periodicize(self,subhalos,boxL):
xpos = subhalos['SubhaloPos'][:,0].flatten() #in cKpc/h of max bound part
ypos = subhalos['SubhaloPos'][:,1].flatten()
zpos = subhalos['SubhaloPos'][:,2].flatten()
N = xpos.shape[0]
sid = np.arange(N)
new_subhalos = copy.copy(subhalos)
new_x = copy.copy(xpos)
new_y = copy.copy(ypos)
new_z = copy.copy(zpos)
new_subhalos['SubFindID'] = np.concatenate((sid,sid))
new_subhalos['SubFindID'] = np.concatenate((new_subhalos['SubFindID'],sid))
new_subhalos['SubFindID'] = np.concatenate((new_subhalos['SubFindID'],sid))
new_subhalos['SubFindID'] = np.concatenate((new_subhalos['SubFindID'],sid))
new_subhalos['SubFindID'] = np.concatenate((new_subhalos['SubFindID'],sid))
new_subhalos['SubFindID'] = np.concatenate((new_subhalos['SubFindID'],sid))
keys = subhalos.keys()
for key in keys:
if key=='SubhaloPos':
#special
#x repeat
new_x = np.concatenate((new_x,xpos+boxL))
new_x = np.concatenate((new_x,xpos-boxL))
new_y = np.concatenate((new_y,ypos))
new_y = np.concatenate((new_y,ypos))
new_z = np.concatenate((new_z,zpos))
new_z = np.concatenate((new_z,zpos))
#y repeat
new_x = np.concatenate((new_x,xpos))
new_x = np.concatenate((new_x,xpos))
new_y = np.concatenate((new_y,ypos+boxL))
new_y = np.concatenate((new_y,ypos-boxL))
new_z = np.concatenate((new_z,zpos))
new_z = np.concatenate((new_z,zpos))
#z repeat
new_x = np.concatenate((new_x,xpos))
new_x = np.concatenate((new_x,xpos))
new_y = np.concatenate((new_y,ypos))
new_y = np.concatenate((new_y,ypos))
new_z = np.concatenate((new_z,zpos+boxL))
new_z = np.concatenate((new_z,zpos-boxL))
new_pos = np.column_stack((new_x,new_y,new_z))
new_subhalos[key] = new_pos
elif key=='count':
new_subhalos[key] = 7*subhalos[key]
else:
new_subhalos[key] = np.concatenate((new_subhalos[key],subhalos[key]))
new_subhalos[key] = np.concatenate((new_subhalos[key],subhalos[key]))
new_subhalos[key] = np.concatenate((new_subhalos[key],subhalos[key]))
new_subhalos[key] = np.concatenate((new_subhalos[key],subhalos[key]))
new_subhalos[key] = np.concatenate((new_subhalos[key],subhalos[key]))
new_subhalos[key] = np.concatenate((new_subhalos[key],subhalos[key])) #7 total boxes
return new_subhalos
# Write to file code taken from here
# --------------------------------------------------------------------------------------------------------------------------------------------------
# def output_catalog(self,outfile):
# print(" Saving catalog: ", outfile)
#
# fobj = open(outfile,'w')
#
# fobj.write('## Lightcone Catalog File for input geometry: '+self.lightconefile+'\n')
# fobj.write('## Catalog source directory: '+self.base_dir+'\n')
# fobj.write('## Square FOV (arcmin): {:12.6f}'.format(self.delb_arcmin)+'\n')
# fobj.write('## Area (arcmin^2): {:12.6f}'.format(self.delb_arcmin**2)+'\n')
# fobj.write('## Baryonic Mass Lower Limit (Msun) : {:10.5e}'.format(self.mass_limit)+'\n')
# fobj.write('## Assumed Cosmology: '+WMAP7.__str__()+'\n')
# fobj.write('## Creator: Teddy Pena (STScI) \n')
# fobj.write('## Catalog & Data Release Reference: Nelson et al. (2019) \n')
# fobj.write('## Catalog & Data Release URL: tng-project.org/data \n')
# fobj.write('## Column 01: Snapshot number \n')
# fobj.write('## Column 02: Subhalo Index \n')
# fobj.write('## Column 03: RA (degrees) \n')
# fobj.write('## Column 04: DEC (degrees) \n')
# fobj.write('## Column 05: RA (proper kpc at true z) \n')
# fobj.write('## Column 06: DEC (proper kpc at true z) \n')
# fobj.write('## Column 07: RA (proper kpc at inferred z) \n')
# fobj.write('## Column 08: DEC (proper kpc at inferred z) \n')
# fobj.write('## Column 09: True cosmological redshift \n')
# fobj.write('## Column 10: Inferred redshift (includes peculiar v) \n')
# fobj.write('## Column 11: Peculiar redshift; Peculiar Velocity / Speed of Light \n')
# fobj.write('## Column 12: True scale at cosmological z, in kpc/arcsec \n')
# fobj.write('## Column 13: [Mpc] Comoving X in Observer Coordinates \n')
# fobj.write('## Column 14: [Mpc] Comoving Y in Observer Coordinates \n')
# fobj.write('## Column 15: [Mpc] Comoving Z in Observer Coordinates \n')
# fobj.write('## Column 16: [Mpc] True Angular Diameter Distance to observer \n')
# fobj.write('## Column 17: [Mpc] Inferred Angular Diameter Distance to observer \n')
# fobj.write('## Column 18: Snapshot redshift \n')
# fobj.write('## Column 19: Geometrically appropriate redshift at center of this cylinder \n')
# fobj.write('## Column 20: Lightcone cylinder number \n')
# fobj.write('## Column 21: [Msun] Stellar mass within 2X stellar half mass radius\n')
# fobj.write('## Column 22: [Msun] Total gas mass within 2X stellar half mass radius\n')
# fobj.write('## Column 23: [Msun] Total mass of this subhalo (excludes children subhalos) \n')
# fobj.write('## Column 24: [Msun] Total BH mass within 2X stellar half mass radius\n')
# fobj.write('## Column 25: [Msun] Total baryon mass within 2X stellar half mass radius\n')
# fobj.write('## Column 26: [Msun/year] SFR within 2X stellar half mass radius\n')
# fobj.write('## Column 27: [(10^10 Msun/h) / (0.978 Gyr/h)] Total BH accretion rate within subhalo\n')
# fobj.write('## Column 28: [Mpc] Camera X in Observer Coordinates (Proper X at z; a transverse coordinate) \n')
# fobj.write('## Column 29: [Mpc] Camera Y in Observer Coordinates (Proper Y at z; a transverse coordinate)\n')
# fobj.write('## Column 30: [Mpc] Camera Z in Observer Coordinates (Proper Z at z; should be almost exactly Column 16)\n')
# fobj.write('## Column 31: [AB Mag] Intrinsic stellar g absolute magnitude (BC03) \n')
# fobj.write('## Column 32: [AB Mag] Intrinsic stellar r absolute magnitude (BC03) \n')
# fobj.write('## Column 33: [AB Mag] Intrinsic stellar i absolute magnitude (BC03) \n')
# fobj.write('## Column 34: [AB Mag] Intrinsic stellar z absolute magnitude (BC03) \n')
# fobj.write('## Column 35: [km/s] Galaxy motion in transverse Camera X direction \n')
# fobj.write('## Column 36: [km/s] Galaxy motion in transverse Camera Y direction \n')
# fobj.write('## Column 37: [km/s] Galaxy motion in line-of-sight Camera Z direction ; the Peculiar Velocity \n')
# fobj.write('## Column 38: [km/s] Cosmological expansion velocity at true z (Column 10 measures Column 37+38)\n')
# fobj.write('## Column 39: [AB Mag] Apparent total rest-frame g-band magnitude (BC03) \n')
for cylobj in self.cylinder_object_list:
if cylobj is not None:
cylobj.print_cylinder(fobj)
fobj.close()
return
class cylinder_catalog:
def __init__(self,snapnum,subhalos,ci,RA_deg,DEC_deg,snapz,galaxy_camera_posx,galaxy_camera_posy,galaxy_camera_posz,centerz,galaxy_camera_velx,galaxy_camera_vely,galaxy_camera_velz,cyl,gmag):
#fields=['SubhaloMass','SubhaloMassInMaxRad','SubhaloMassInRadType','SubhaloMassInMaxRadType','SubhaloPos','SubhaloSFR','SubhaloSFRinRad','SubhaloVel','SubhaloBHMass','SubhaloBHMdot','SubhaloStellarPhotometrics','SubhaloWindMass']
self.snapshot_number = snapnum + np.zeros_like(ci)
self.subhalo_index = subhalos['SubFindID'][ci]
self.RA_deg = RA_deg
self.DEC_deg = DEC_deg
self.snapz = snapz + np.zeros_like(RA_deg)
self.center_z = centerz + np.zeros_like(RA_deg)
self.cylinder_number = cyl + np.zeros_like(self.subhalo_index)
self.galaxy_comoving_x_mpc = galaxy_camera_posx[ci]/1000.0
self.galaxy_comoving_y_mpc = galaxy_camera_posy[ci]/1000.0
self.galaxy_comoving_z_mpc = galaxy_camera_posz[ci]/1000.0
#self.galaxy_camera_posx = galaxy_camera_posx[ci]/1000.0
#self.galaxy_camera_posy = galaxy_camera_posy[ci]/1000.0
#self.galaxy_camera_posz = galaxy_camera_posz[ci]/1000.0
self.galaxy_camera_velx = galaxy_camera_velx[ci]
self.galaxy_camera_vely = galaxy_camera_vely[ci]
self.galaxy_camera_velz = galaxy_camera_velz[ci]
self.galaxy_peculiar_vr = 1.0*self.galaxy_camera_velz
self.galaxy_peculiar_z = 1.0*self.galaxy_peculiar_vr/(astropy.constants.c.value/1.0e3)
self.cosmological_redshift = np.zeros_like(self.RA_deg)
for i,index in enumerate(ci):
self.cosmological_redshift[i] = np.float64(z_at_value(WMAP7.comoving_distance, self.galaxy_comoving_z_mpc[i]*u.megaparsec,ztol=1e-12,maxfun=2000))
self.hubble_velocity = self.cosmological_redshift*astropy.constants.c.value/1.0e3 #in km/s
self.galaxy_observed_z = 1.0*self.cosmological_redshift + self.galaxy_peculiar_z
#self.galaxy_comoving_x_mpc = self.galaxy_camera_posx*(1.0 + self.cosmological_redshift)
#self.galaxy_comoving_y_mpc = self.galaxy_camera_posy*(1.0 + self.cosmological_redshift)
#self.galaxy_comoving_z_mpc = self.galaxy_camera_posz*(1.0 + self.cosmological_redshift)
self.galaxy_camera_posx = self.galaxy_comoving_x_mpc/(1.0 + self.cosmological_redshift)
self.galaxy_camera_posy = self.galaxy_comoving_y_mpc/(1.0 + self.cosmological_redshift)
self.galaxy_camera_posz = self.galaxy_comoving_z_mpc/(1.0 + self.cosmological_redshift)
self.angdiam_mpc = np.asarray(WMAP7.angular_diameter_distance(self.cosmological_redshift))
self.kpc_per_arcsec = np.asarray(WMAP7.kpc_proper_per_arcmin(self.cosmological_redshift)/60.0)
self.observed_angdiam_mpc = np.asarray(WMAP7.angular_diameter_distance(self.galaxy_observed_z))
self.observed_comoving_mpc = np.asarray(WMAP7.comoving_distance(self.galaxy_observed_z))
self.observed_kpc_per_arcsec = np.asarray(WMAP7.kpc_proper_per_arcmin(self.galaxy_observed_z)/60.0)
self.RA_kpc = self.RA_deg*3600.0*self.kpc_per_arcsec
self.DEC_kpc = self.DEC_deg*3600.0*self.kpc_per_arcsec
self.observed_RA_kpc = self.RA_deg*3600.0*self.observed_kpc_per_arcsec
self.observed_DEC_kpc = self.DEC_deg*3600.0*self.observed_kpc_per_arcsec
self.mstar_msun = subhalos['SubhaloMassInRadType'][self.subhalo_index,4]*(1.0e10)/ilh
self.mgas_msun = subhalos['SubhaloMassInRadType'][self.subhalo_index,0]*(1.0e10)/ilh #includes wind mass
self.mbh_msun = subhalos['SubhaloMassInRadType'][self.subhalo_index,5]*(1.0e10)/ilh
self.mhalo_msun = subhalos['SubhaloMass'][self.subhalo_index]*(1.0e10)/ilh
self.baryonmass_msun = self.mstar_msun + self.mgas_msun + self.mbh_msun #within 2x stellar half mass radius... best?
self.xpos_ckh = subhalos['SubhaloPos'][self.subhalo_index,0] #in cKpc/h of max bound part
self.ypos_ckh = subhalos['SubhaloPos'][self.subhalo_index,1]
self.zpos_ckh = subhalos['SubhaloPos'][self.subhalo_index,2]
self.xpos_pmpc = (self.xpos_ckh*1.0/(1.0 + snapz )/ilh)/1.0e3
self.ypos_pmpc = (self.ypos_ckh*1.0/(1.0 + snapz )/ilh)/1.0e3
self.zpos_pmpc = (self.zpos_ckh*1.0/(1.0 + snapz )/ilh)/1.0e3
self.xvel_kms = subhalos['SubhaloVel'][self.subhalo_index,0]
self.yvel_kms = subhalos['SubhaloVel'][self.subhalo_index,1]
self.zvel_kms = subhalos['SubhaloVel'][self.subhalo_index,2]
self.sfr = subhalos['SubhaloSFRinRad'][self.subhalo_index]
self.bhmdot = subhalos['SubhaloBHMdot'][self.subhalo_index]
self.gmag = subhalos['SubhaloStellarPhotometrics'][self.subhalo_index,4]
self.rmag = subhalos['SubhaloStellarPhotometrics'][self.subhalo_index,5]
self.imag = subhalos['SubhaloStellarPhotometrics'][self.subhalo_index,6]
self.zmag = subhalos['SubhaloStellarPhotometrics'][self.subhalo_index,7]
self.gmag_apparent=gmag[ci]
#self.total_redshift =
return
def print_cylinder(self,outobj):
for i,shi in enumerate(self.subhalo_index):
thisline = '{:8d}{:12d} {:12.6f} {:12.6f} {:10.2f} {:10.2f} {:10.2f} {:10.2f} '\
'{:12.8f} {:12.8f} {:12.4e} {:8.4f} '\
'{:10.4f} {:10.4f} {:16.4f} {:16.4f} {:16.4f} {:12.8f} {:12.8f} {:8d}'\
'{:12.4e} {:12.4e} {:12.4e} {:12.4e} {:12.4e} {:16.4f} {:10.4e}'\
' {:10.4f} {:10.4f} {:16.4f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:12.4e} {:8.2f}'\
'\n'.format(self.snapshot_number[i],shi,self.RA_deg[i],self.DEC_deg[i],self.RA_kpc[i],self.DEC_kpc[i],self.observed_RA_kpc[i],self.observed_DEC_kpc[i],
self.cosmological_redshift[i],self.galaxy_observed_z[i],self.galaxy_peculiar_z[i],self.kpc_per_arcsec[i],
self.galaxy_comoving_x_mpc[i],self.galaxy_comoving_y_mpc[i],self.galaxy_comoving_z_mpc[i],self.angdiam_mpc[i],self.observed_angdiam_mpc[i],self.snapz[i],self.center_z[i],self.cylinder_number[i],
self.mstar_msun[i],self.mgas_msun[i],self.mhalo_msun[i],self.mbh_msun[i],self.baryonmass_msun[i],self.sfr[i],self.bhmdot[i],
self.galaxy_camera_posx[i],self.galaxy_camera_posy[i],self.galaxy_camera_posz[i],
self.gmag[i],self.rmag[i],self.imag[i],self.zmag[i],
self.galaxy_camera_velx[i],self.galaxy_camera_vely[i],self.galaxy_camera_velz[i],self.hubble_velocity[i],self.gmag_apparent[i])
outobj.write(thisline)
return
def process_lightcone_catalog(lightcone=None,base_dir=None,mass_limit=10.0**9.5,sfr_limit=0.0,mag_limit=None):
assert (lightcone is not None) and (base_dir is not None)
assert os.path.lexists(base_dir)
catalog_object = lightcone_catalog(lightcone,base_dir,mass_limit=mass_limit,sfr_limit=sfr_limit,mag_limit=mag_limit)
return catalog_object
if __name__=="__main__":
# Input parameters
magl=30
minz=0.1
maxz=8.8
catalog_xyz = process_lightcone_catalog(lightcone="./tng300_6_5_xyz.txt",base_dir='/home/tnguser/sims.TNG/TNG300-1/output/',mag_limit=magl)
catalog_xyz = catalog_xyz.process_lightcone(minz=minz,maxz=maxz, outfile='./Lightcone_TNG300-1_mag30_6_5_xyz.txt')
#catalog_xyz.output_catalog('./Lightcone_TNG300-1_mag30_6_5_xyz.txt')
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.special_math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.platform import test
class LBetaTest(test.TestCase):
_use_gpu = False
def test_one_dimensional_arg(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_one)).eval())
self.assertAllClose(
0.5, math_ops.exp(special_math_ops.lbeta(x_one_half)).eval())
self.assertEqual([], special_math_ops.lbeta(x_one).get_shape())
def test_one_dimensional_arg_dynamic_alloc(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.test_session(use_gpu=self._use_gpu):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one}))
self.assertAllClose(0.5, beta_ph.eval(feed_dict={ph: x_one_half}))
def test_two_dimensional_arg(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(
[0.5, 0.5], math_ops.exp(special_math_ops.lbeta(x_one_half)).eval())
self.assertEqual((2,), special_math_ops.lbeta(x_one_half).get_shape())
def test_two_dimensional_arg_dynamic_alloc(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=self._use_gpu):
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose([0.5, 0.5], beta_ph.eval(feed_dict={ph: x_one_half}))
def test_two_dimensional_proper_shape(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(
[0.5, 0.5], math_ops.exp(special_math_ops.lbeta(x_one_half)).eval())
self.assertEqual(
(2,), array_ops.shape(special_math_ops.lbeta(x_one_half)).eval())
self.assertEqual(
tensor_shape.TensorShape([2]),
special_math_ops.lbeta(x_one_half).get_shape())
def test_complicated_shape(self):
with self.test_session(use_gpu=self._use_gpu):
x = ops.convert_to_tensor(np.random.rand(3, 2, 2))
self.assertAllEqual(
(3, 2), array_ops.shape(special_math_ops.lbeta(x)).eval())
self.assertEqual(
tensor_shape.TensorShape([3, 2]),
special_math_ops.lbeta(x).get_shape())
def test_length_1_last_dimension_results_in_one(self):
# If there is only one coefficient, the formula still works, and we get one
# as the answer, always.
x_a = [5.5]
x_b = [0.1]
with self.test_session(use_gpu=self._use_gpu):
self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_a)).eval())
self.assertAllClose(1, math_ops.exp(special_math_ops.lbeta(x_b)).eval())
self.assertEqual((), special_math_ops.lbeta(x_a).get_shape())
def test_empty_rank2_or_greater_input_gives_empty_output(self):
with self.test_session(use_gpu=self._use_gpu):
self.assertAllEqual([], special_math_ops.lbeta([[]]).eval())
self.assertEqual((0,), special_math_ops.lbeta([[]]).get_shape())
self.assertAllEqual([[]], special_math_ops.lbeta([[[]]]).eval())
self.assertEqual((1, 0), special_math_ops.lbeta([[[]]]).get_shape())
def test_empty_rank2_or_greater_input_gives_empty_output_dynamic_alloc(self):
with self.test_session(use_gpu=self._use_gpu):
ph = array_ops.placeholder(dtypes.float32)
self.assertAllEqual(
[], special_math_ops.lbeta(ph).eval(feed_dict={ph: [[]]}))
self.assertAllEqual(
[[]], special_math_ops.lbeta(ph).eval(feed_dict={ph: [[[]]]}))
def test_empty_rank1_input_raises_value_error(self):
with self.test_session(use_gpu=self._use_gpu):
with self.assertRaisesRegexp(ValueError, 'rank'):
special_math_ops.lbeta([])
def test_empty_rank1_dynamic_alloc_input_raises_op_error(self):
with self.test_session(use_gpu=self._use_gpu):
ph = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError('rank'):
special_math_ops.lbeta(ph).eval(feed_dict={ph: []})
class LBetaTestGpu(LBetaTest):
_use_gpu = True
class EinsumTest(test.TestCase):
simple_cases = [
'ij,jk->ik',
'ijk,jklm->il',
'ij,jk,kl->il',
'ijk->i',
'ijk->kji',
'ji,kj->ik',
'ikl,kji->kl',
'klj,lki->ij',
'ijk,ilj->kli',
'kij,mkb->ijmb',
'ijk,ijl,ikl->i',
'i,ijk,j->k',
'ij,ij,jk,kl->il',
'ij,kj,il,jm->ml',
'a,ab,abc->abc',
'a,b,ab->ab',
'ab,ab,c->',
'ab,ab,c->c',
'ab,ab,cd,cd->',
'ab,ab,cd,cd->ac',
'ab,ab,cd,cd->cd',
'ab,ab,cd,cd,ef,ef->',
'ab,cd,ef->abcdef',
'ab,cd,ef->acdf',
'ab,cd,de->abcde',
'ab,cd,de->be',
'ab,bcd,cd->abcd',
'ab,bcd,cd->abd',
'eb,cb,fb->cef',
'abcd,ad',
'bd,db,eac->ace',
'ba,ac,da->bcd',
'ab,ab',
'ab,ba',
'abc,abc',
'abc,bac',
'abc,cba',
'dba,ead,cad->bce',
'aef,fbc,dca->bde',
]
long_cases = [
'bca,cdb,dbf,afc->',
'efc,dbc,acf,fd->abe',
'ea,fb,gc,hd,abcd->efgh',
'ea,fb,abcd,gc,hd->efgh',
'abhe,hidj,jgba,hiab,gab',
]
invalid_cases = [
# bad formats
'',
'ijk ijk',
'ij.jk->ik',
'ij...,jk...->ik...',
# axis in output that does not exist
'ij,jk->im',
# incorrect number of dimensions
'ij,jkl->kl',
# this is allowed in numpy but not implemented here yet
'iij,jk'
]
dim_mismatch_cases = [('ijk,jkl->il', [(2, 3, 4), (3, 5, 6)])]
def test_simple(self):
for case in self.simple_cases:
self.run_test(case)
def test_long(self):
for case in self.long_cases:
self.run_test(case)
def test_invalid(self):
for axes in self.invalid_cases:
inputs = [
array_ops.placeholder(
dtypes.float32, shape=(3, 4)),
array_ops.placeholder(
dtypes.float32, shape=(3, 4)),
]
with self.assertRaises(ValueError):
_ = special_math_ops.einsum(axes, *inputs)
def test_dim_mismatch(self):
for axes, input_shapes in self.dim_mismatch_cases:
inputs = [
array_ops.placeholder(
dtypes.float32, shape=shape) for shape in input_shapes
]
with self.assertRaises(ValueError):
_ = special_math_ops.einsum(axes, *inputs)
def run_test(self, axes):
all_axes = {ax: np.random.randint(4, 12) for ax in axes if ax.isalpha()}
input_vals = []
input_axes, _, _ = axes.partition('->')
for idx in input_axes.split(','):
shape = [all_axes[ax] for ax in idx]
input_vals.append(np.random.random(shape))
input_tensors = [constant_op.constant(val) for val in input_vals]
output_tensor = special_math_ops.einsum(axes, *input_tensors)
with self.test_session():
output_value = output_tensor.eval()
correct_value = np.einsum(axes, *input_vals)
err = np.abs(correct_value - output_value).max()
print(axes, err)
assert err < 1e-8
def test_input_is_placeholder(self):
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(1, None))
m1 = array_ops.placeholder(dtypes.int32, shape=(None, 1))
out = special_math_ops.einsum('ij,jk->ik', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[1, 2, 3]],
m1: [[2], [1], [1]],
}
np.testing.assert_almost_equal(
[[7]], sess.run(out, feed_dict=feed_dict))
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(None, 3))
m1 = array_ops.placeholder(dtypes.int32, shape=(3,))
out = special_math_ops.einsum('ij,j->i', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[1, 2, 3]],
m1: [2, 1, 1],
}
np.testing.assert_almost_equal([7], sess.run(out, feed_dict=feed_dict))
# Tests for placeholders which have two or more None values
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(None, None, 2))
m1 = array_ops.placeholder(dtypes.int32, shape=(2, 1))
out = special_math_ops.einsum('ijk,kl->ijl', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[[1,2]]],
m1: [[3], [2]],
}
np.testing.assert_almost_equal(
[[[7]]], sess.run(out, feed_dict=feed_dict))
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(2, 1))
m1 = array_ops.placeholder(dtypes.int32, shape=(None, None, 2))
out = special_math_ops.einsum('kl,ijk->ijl', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[3], [2]],
m1: [[[1,2]]],
}
np.testing.assert_almost_equal(
[[[7]]], sess.run(out, feed_dict=feed_dict))
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(None, None, 2))
m1 = array_ops.placeholder(dtypes.int32, shape=(2,))
out = special_math_ops.einsum('ijk,k->ij', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[[1, 2]]],
m1: [3, 2],
}
np.testing.assert_almost_equal(
[[7]], sess.run(out, feed_dict=feed_dict))
with ops.Graph().as_default():
m0 = array_ops.placeholder(dtypes.int32, shape=(None, 2, None, 2))
m1 = array_ops.placeholder(dtypes.int32, shape=(None, 2))
out = special_math_ops.einsum('ijkl,ij->ikl', m0, m1)
with session.Session() as sess:
feed_dict = {
m0: [[[[1, 2]], [[2, 1]]]],
m1: [[3, 2]],
}
np.testing.assert_almost_equal(
[[[7, 8]]], sess.run(out, feed_dict=feed_dict))
if __name__ == '__main__':
test.main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytree nodes with extra formatting information.
This is a thin wrapper around a pytree.Leaf node.
"""
import keyword
import re
from lib2to3 import pytree
from lib2to3.pgen2 import token
from yapf.yapflib import pytree_utils
from yapf.yapflib import style
CONTINUATION = token.N_TOKENS
token.N_TOKENS += 1
class Subtype(object):
"""Subtype information about tokens.
Gleaned from parsing the code. Helps determine the best formatting.
"""
NONE = 0
UNARY_OPERATOR = 1
BINARY_OPERATOR = 2
SUBSCRIPT_COLON = 3
DEFAULT_OR_NAMED_ASSIGN = 4
VARARGS_STAR = 5
KWARGS_STAR_STAR = 6
ASSIGN_OPERATOR = 7
DICTIONARY_KEY = 8
DICTIONARY_VALUE = 9
DICT_SET_GENERATOR = 10
COMP_FOR = 11
COMP_IF = 12
IF_TEST_EXPR = 13
DEFAULT_OR_NAMED_ASSIGN_ARG_LIST = 14
class FormatToken(object):
"""A wrapper around pytree Leaf nodes.
This represents the token plus additional information useful for reformatting
the code.
Attributes:
next_token: The token in the unwrapped line after this token or None if this
is the last token in the unwrapped line.
previous_token: The token in the unwrapped line before this token or None if
this is the first token in the unwrapped line.
matching_bracket: If a bracket token ('[', '{', or '(') the matching
bracket.
whitespace_prefix: The prefix for the whitespace.
spaces_required_before: The number of spaces required before a token. This
is a lower-bound for the formatter and not a hard requirement. For
instance, a comment may have n required spaces before it. But the
formatter won't place n spaces before all comments. Only those that are
moved to the end of a line of code. The formatter may use different
spacing when appropriate.
can_break_before: True if we're allowed to break before this token.
must_break_before: True if we're required to break before this token.
total_length: The total length of the unwrapped line up to and including
whitespace and this token. However, this doesn't include the initial
indentation amount.
split_penalty: The penalty for splitting the line before this token.
"""
def __init__(self, node):
"""Constructor.
Arguments:
node: (pytree.Leaf) The node that's being wrapped.
"""
assert isinstance(node, pytree.Leaf)
self._node = node
self.next_token = None
self.previous_token = None
self.matching_bracket = None
self.whitespace_prefix = ''
self.can_break_before = False
self.must_break_before = False
self.total_length = 0 # TODO(morbo): Think up a better name.
self.split_penalty = 0
if self.is_comment:
self.spaces_required_before = style.Get('SPACES_BEFORE_COMMENT')
else:
self.spaces_required_before = 0
def AddWhitespacePrefix(self, newlines_before, spaces=0, indent_level=0):
"""Register a token's whitespace prefix.
This is the whitespace that will be output before a token's string.
Arguments:
newlines_before: (int) The number of newlines to place before the token.
spaces: (int) The number of spaces to place before the token.
indent_level: (int) The indentation level.
"""
spaces_before = (
' ' * indent_level * style.Get('INDENT_WIDTH') + ' ' * spaces
)
if self.is_comment:
comment_lines = [s.lstrip() for s in self.value.splitlines()]
self._node.value = ('\n' + spaces_before).join(comment_lines)
if not self.whitespace_prefix:
self.whitespace_prefix = (
'\n' * (self.newlines or newlines_before) + spaces_before
)
else:
self.whitespace_prefix += spaces_before
def AdjustNewlinesBefore(self, newlines_before):
"""Change the number of newlines before this token."""
self.whitespace_prefix = (
'\n' * newlines_before + self.whitespace_prefix.lstrip('\n')
)
def RetainHorizontalSpacing(self, first_column, depth):
"""Retains a token's horizontal spacing."""
previous = self.previous_token
if previous is None:
return
cur_lineno = self.lineno
prev_lineno = previous.lineno
if previous.is_multiline_string:
prev_lineno += previous.value.count('\n')
if cur_lineno != prev_lineno:
self.spaces_required_before = (
self.column - first_column + depth * style.Get('INDENT_WIDTH'))
return
cur_column = self.node.column
prev_column = previous.node.column
prev_len = len(previous.value)
if previous.is_multiline_string:
prev_len = len(previous.value.split('\n')[-1])
self.spaces_required_before = cur_column - (prev_column + prev_len)
def OpensScope(self):
return self.value in pytree_utils.OPENING_BRACKETS
def ClosesScope(self):
return self.value in pytree_utils.CLOSING_BRACKETS
def GetPytreeNode(self):
return self._node
@property
def value(self):
if self.is_continuation:
return self._node.value.rstrip()
return self._node.value
@property
def node(self):
return self._node
@property
def node_split_penalty(self):
"""Split penalty attached to the pytree node of this token.
Returns:
The penalty, or None if no annotation is attached.
"""
return pytree_utils.GetNodeAnnotation(self._node,
pytree_utils.Annotation.SPLIT_PENALTY,
default=0)
@property
def newlines(self):
"""The number of newlines needed before this token."""
return pytree_utils.GetNodeAnnotation(self._node,
pytree_utils.Annotation.NEWLINES)
@property
def column(self):
"""The original column number of the node in the source."""
return self._node.column
@property
def lineno(self):
"""The original line number of the node in the source."""
return self._node.lineno
@property
def subtypes(self):
"""Extra type information for directing formatting."""
value = pytree_utils.GetNodeAnnotation(self._node,
pytree_utils.Annotation.SUBTYPE)
return [Subtype.NONE] if value is None else value
@property
def is_binary_op(self):
"""Token is a binary operator."""
return Subtype.BINARY_OPERATOR in self.subtypes
@property
def name(self):
"""A string representation of the node's name."""
return pytree_utils.NodeName(self._node)
def __repr__(self):
return 'FormatToken(name={0}, value={1})'.format(self.name, self.value)
@property
def is_comment(self):
return self._node.type == token.COMMENT
@property
def is_continuation(self):
return self._node.type == CONTINUATION
@property
def is_keyword(self):
return keyword.iskeyword(self.value)
@property
def is_name(self):
return self._node.type == token.NAME and not self.is_keyword
@property
def is_number(self):
return self._node.type == token.NUMBER
@property
def is_string(self):
return self._node.type == token.STRING
@property
def is_multiline_string(self):
return (self.is_string and
re.match(r'^[uUbB]?[rR]?(?P<delim>"""|\'\'\').*(?P=delim)$',
self.value, re.DOTALL) is not None)
@property
def is_docstring(self):
return self.is_multiline_string and not self.node.prev_sibling
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
import mock
from mock import call
from osc_lib.cli import format_columns
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.network.v2 import port
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
from openstackclient.tests.unit.network.v2 import fakes as network_fakes
from openstackclient.tests.unit import utils as tests_utils
class TestPort(network_fakes.TestNetworkV2):
def setUp(self):
super(TestPort, self).setUp()
# Get a shortcut to the network client
self.network = self.app.client_manager.network
# Get a shortcut to the ProjectManager Mock
self.projects_mock = self.app.client_manager.identity.projects
@staticmethod
def _get_common_cols_data(fake_port):
columns = (
'admin_state_up',
'allowed_address_pairs',
'binding_host_id',
'binding_profile',
'binding_vif_details',
'binding_vif_type',
'binding_vnic_type',
'data_plane_status',
'description',
'device_id',
'device_owner',
'dns_assignment',
'dns_domain',
'dns_name',
'extra_dhcp_opts',
'fixed_ips',
'id',
'mac_address',
'name',
'network_id',
'port_security_enabled',
'project_id',
'qos_policy_id',
'security_group_ids',
'status',
'tags',
'uplink_status_propagation',
)
data = (
port.AdminStateColumn(fake_port.admin_state_up),
format_columns.ListDictColumn(fake_port.allowed_address_pairs),
fake_port.binding_host_id,
format_columns.DictColumn(fake_port.binding_profile),
format_columns.DictColumn(fake_port.binding_vif_details),
fake_port.binding_vif_type,
fake_port.binding_vnic_type,
fake_port.data_plane_status,
fake_port.description,
fake_port.device_id,
fake_port.device_owner,
format_columns.ListDictColumn(fake_port.dns_assignment),
fake_port.dns_domain,
fake_port.dns_name,
format_columns.ListDictColumn(fake_port.extra_dhcp_opts),
format_columns.ListDictColumn(fake_port.fixed_ips),
fake_port.id,
fake_port.mac_address,
fake_port.name,
fake_port.network_id,
fake_port.port_security_enabled,
fake_port.project_id,
fake_port.qos_policy_id,
format_columns.ListColumn(fake_port.security_group_ids),
fake_port.status,
format_columns.ListColumn(fake_port.tags),
fake_port.uplink_status_propagation,
)
return columns, data
class TestCreatePort(TestPort):
_port = network_fakes.FakePort.create_one_port()
columns, data = TestPort._get_common_cols_data(_port)
def setUp(self):
super(TestCreatePort, self).setUp()
self.network.create_port = mock.Mock(return_value=self._port)
self.network.set_tags = mock.Mock(return_value=None)
fake_net = network_fakes.FakeNetwork.create_one_network({
'id': self._port.network_id,
})
self.network.find_network = mock.Mock(return_value=fake_net)
self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet()
self.network.find_subnet = mock.Mock(return_value=self.fake_subnet)
# Get the command object to test
self.cmd = port.CreatePort(self.app, self.namespace)
def test_create_default_options(self):
arglist = [
'--network', self._port.network_id,
'test-port',
]
verifylist = [
('network', self._port.network_id,),
('enable', True),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'name': 'test-port',
})
self.assertFalse(self.network.set_tags.called)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_full_options(self):
arglist = [
'--mac-address', 'aa:aa:aa:aa:aa:aa',
'--fixed-ip', 'subnet=%s,ip-address=10.0.0.2'
% self.fake_subnet.id,
'--description', self._port.description,
'--device', 'deviceid',
'--device-owner', 'fakeowner',
'--disable',
'--vnic-type', 'macvtap',
'--binding-profile', 'foo=bar',
'--binding-profile', 'foo2=bar2',
'--network', self._port.network_id,
'--dns-domain', 'example.org',
'--dns-name', '8.8.8.8',
'test-port',
]
verifylist = [
('mac_address', 'aa:aa:aa:aa:aa:aa'),
(
'fixed_ip',
[{'subnet': self.fake_subnet.id, 'ip-address': '10.0.0.2'}]
),
('description', self._port.description),
('device', 'deviceid'),
('device_owner', 'fakeowner'),
('disable', True),
('vnic_type', 'macvtap'),
('binding_profile', {'foo': 'bar', 'foo2': 'bar2'}),
('network', self._port.network_id),
('dns_domain', 'example.org'),
('dns_name', '8.8.8.8'),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'mac_address': 'aa:aa:aa:aa:aa:aa',
'fixed_ips': [{'subnet_id': self.fake_subnet.id,
'ip_address': '10.0.0.2'}],
'description': self._port.description,
'device_id': 'deviceid',
'device_owner': 'fakeowner',
'admin_state_up': False,
'binding:vnic_type': 'macvtap',
'binding:profile': {'foo': 'bar', 'foo2': 'bar2'},
'network_id': self._port.network_id,
'dns_domain': 'example.org',
'dns_name': '8.8.8.8',
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_invalid_json_binding_profile(self):
arglist = [
'--network', self._port.network_id,
'--binding-profile', '{"parent_name":"fake_parent"',
'test-port',
]
self.assertRaises(argparse.ArgumentTypeError,
self.check_parser,
self.cmd,
arglist,
None)
def test_create_invalid_key_value_binding_profile(self):
arglist = [
'--network', self._port.network_id,
'--binding-profile', 'key',
'test-port',
]
self.assertRaises(argparse.ArgumentTypeError,
self.check_parser,
self.cmd,
arglist,
None)
def test_create_json_binding_profile(self):
arglist = [
'--network', self._port.network_id,
'--binding-profile', '{"parent_name":"fake_parent"}',
'--binding-profile', '{"tag":42}',
'test-port',
]
verifylist = [
('network', self._port.network_id),
('enable', True),
('binding_profile', {'parent_name': 'fake_parent', 'tag': 42}),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'binding:profile': {'parent_name': 'fake_parent', 'tag': 42},
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_with_security_group(self):
secgroup = network_fakes.FakeSecurityGroup.create_one_security_group()
self.network.find_security_group = mock.Mock(return_value=secgroup)
arglist = [
'--network', self._port.network_id,
'--security-group', secgroup.id,
'test-port',
]
verifylist = [
('network', self._port.network_id,),
('enable', True),
('security_group', [secgroup.id]),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'security_group_ids': [secgroup.id],
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_port_with_dns_name(self):
arglist = [
'--network', self._port.network_id,
'--dns-name', '8.8.8.8',
'test-port',
]
verifylist = [
('network', self._port.network_id,),
('enable', True),
('dns_name', '8.8.8.8'),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'dns_name': '8.8.8.8',
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_with_security_groups(self):
sg_1 = network_fakes.FakeSecurityGroup.create_one_security_group()
sg_2 = network_fakes.FakeSecurityGroup.create_one_security_group()
self.network.find_security_group = mock.Mock(side_effect=[sg_1, sg_2])
arglist = [
'--network', self._port.network_id,
'--security-group', sg_1.id,
'--security-group', sg_2.id,
'test-port',
]
verifylist = [
('network', self._port.network_id,),
('enable', True),
('security_group', [sg_1.id, sg_2.id]),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'security_group_ids': [sg_1.id, sg_2.id],
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_with_no_security_groups(self):
arglist = [
'--network', self._port.network_id,
'--no-security-group',
'test-port',
]
verifylist = [
('network', self._port.network_id),
('enable', True),
('no_security_group', True),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'security_group_ids': [],
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_with_no_fixed_ips(self):
arglist = [
'--network', self._port.network_id,
'--no-fixed-ip',
'test-port',
]
verifylist = [
('network', self._port.network_id),
('enable', True),
('no_fixed_ip', True),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'fixed_ips': [],
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_port_with_allowed_address_pair_ipaddr(self):
pairs = [{'ip_address': '192.168.1.123'},
{'ip_address': '192.168.1.45'}]
arglist = [
'--network', self._port.network_id,
'--allowed-address', 'ip-address=192.168.1.123',
'--allowed-address', 'ip-address=192.168.1.45',
'test-port',
]
verifylist = [
('network', self._port.network_id),
('enable', True),
('allowed_address_pairs', [{'ip-address': '192.168.1.123'},
{'ip-address': '192.168.1.45'}]),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'allowed_address_pairs': pairs,
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_port_with_allowed_address_pair(self):
pairs = [{'ip_address': '192.168.1.123',
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'ip_address': '192.168.1.45',
'mac_address': 'aa:aa:aa:aa:aa:b1'}]
arglist = [
'--network', self._port.network_id,
'--allowed-address',
'ip-address=192.168.1.123,mac-address=aa:aa:aa:aa:aa:aa',
'--allowed-address',
'ip-address=192.168.1.45,mac-address=aa:aa:aa:aa:aa:b1',
'test-port',
]
verifylist = [
('network', self._port.network_id),
('enable', True),
('allowed_address_pairs', [{'ip-address': '192.168.1.123',
'mac-address': 'aa:aa:aa:aa:aa:aa'},
{'ip-address': '192.168.1.45',
'mac-address': 'aa:aa:aa:aa:aa:b1'}]),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'allowed_address_pairs': pairs,
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_port_with_qos(self):
qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
arglist = [
'--network', self._port.network_id,
'--qos-policy', qos_policy.id,
'test-port',
]
verifylist = [
('network', self._port.network_id,),
('enable', True),
('qos_policy', qos_policy.id),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'qos_policy_id': qos_policy.id,
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_port_security_enabled(self):
arglist = [
'--network', self._port.network_id,
'--enable-port-security',
'test-port',
]
verifylist = [
('network', self._port.network_id,),
('enable', True),
('enable_port_security', True),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'port_security_enabled': True,
'name': 'test-port',
})
def test_create_port_security_disabled(self):
arglist = [
'--network', self._port.network_id,
'--disable-port-security',
'test-port',
]
verifylist = [
('network', self._port.network_id,),
('enable', True),
('disable_port_security', True),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'port_security_enabled': False,
'name': 'test-port',
})
def _test_create_with_tag(self, add_tags=True):
arglist = [
'--network', self._port.network_id,
'test-port',
]
if add_tags:
arglist += ['--tag', 'red', '--tag', 'blue']
else:
arglist += ['--no-tag']
verifylist = [
('network', self._port.network_id,),
('enable', True),
('name', 'test-port'),
]
if add_tags:
verifylist.append(('tags', ['red', 'blue']))
else:
verifylist.append(('no_tag', True))
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(
admin_state_up=True,
network_id=self._port.network_id,
name='test-port'
)
if add_tags:
self.network.set_tags.assert_called_once_with(
self._port,
tests_utils.CompareBySet(['red', 'blue']))
else:
self.assertFalse(self.network.set_tags.called)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_with_tags(self):
self._test_create_with_tag(add_tags=True)
def test_create_with_no_tag(self):
self._test_create_with_tag(add_tags=False)
def _test_create_with_uplink_status_propagation(self, enable=True):
arglist = [
'--network', self._port.network_id,
'test-port',
]
if enable:
arglist += ['--enable-uplink-status-propagation']
else:
arglist += ['--disable-uplink-status-propagation']
verifylist = [
('network', self._port.network_id,),
('name', 'test-port'),
]
if enable:
verifylist.append(('enable_uplink_status_propagation', True))
else:
verifylist.append(('disable_uplink_status_propagation', True))
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'propagate_uplink_status': enable,
'name': 'test-port',
})
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
def test_create_with_uplink_status_propagation_enabled(self):
self._test_create_with_uplink_status_propagation(enable=True)
def test_create_with_uplink_status_propagation_disabled(self):
self._test_create_with_uplink_status_propagation(enable=False)
def test_create_port_with_extra_dhcp_option(self):
extra_dhcp_options = [{'opt_name': 'classless-static-route',
'opt_value': '169.254.169.254/32,22.2.0.2,'
'0.0.0.0/0,22.2.0.1',
'ip_version': '4'},
{'opt_name': 'dns-server',
'opt_value': '240C::6666',
'ip_version': '6'}]
arglist = [
'--network', self._port.network_id,
'--extra-dhcp-option', 'name=classless-static-route,'
'value=169.254.169.254/32,22.2.0.2,'
'0.0.0.0/0,22.2.0.1,'
'ip-version=4',
'--extra-dhcp-option', 'name=dns-server,value=240C::6666,'
'ip-version=6',
'test-port',
]
verifylist = [
('network', self._port.network_id,),
('extra_dhcp_options', [{'name': 'classless-static-route',
'value': '169.254.169.254/32,22.2.0.2,'
'0.0.0.0/0,22.2.0.1',
'ip-version': '4'},
{'name': 'dns-server',
'value': '240C::6666',
'ip-version': '6'}]),
('name', 'test-port'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.network.create_port.assert_called_once_with(**{
'admin_state_up': True,
'network_id': self._port.network_id,
'extra_dhcp_opts': extra_dhcp_options,
'name': 'test-port',
})
class TestDeletePort(TestPort):
# Ports to delete.
_ports = network_fakes.FakePort.create_ports(count=2)
def setUp(self):
super(TestDeletePort, self).setUp()
self.network.delete_port = mock.Mock(return_value=None)
self.network.find_port = network_fakes.FakePort.get_ports(
ports=self._ports)
# Get the command object to test
self.cmd = port.DeletePort(self.app, self.namespace)
def test_port_delete(self):
arglist = [
self._ports[0].name,
]
verifylist = [
('port', [self._ports[0].name]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.network.find_port.assert_called_once_with(
self._ports[0].name, ignore_missing=False)
self.network.delete_port.assert_called_once_with(self._ports[0])
self.assertIsNone(result)
def test_multi_ports_delete(self):
arglist = []
verifylist = []
for p in self._ports:
arglist.append(p.name)
verifylist = [
('port', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
calls = []
for p in self._ports:
calls.append(call(p))
self.network.delete_port.assert_has_calls(calls)
self.assertIsNone(result)
def test_multi_ports_delete_with_exception(self):
arglist = [
self._ports[0].name,
'unexist_port',
]
verifylist = [
('port',
[self._ports[0].name, 'unexist_port']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
find_mock_result = [self._ports[0], exceptions.CommandError]
self.network.find_port = (
mock.Mock(side_effect=find_mock_result)
)
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual('1 of 2 ports failed to delete.', str(e))
self.network.find_port.assert_any_call(
self._ports[0].name, ignore_missing=False)
self.network.find_port.assert_any_call(
'unexist_port', ignore_missing=False)
self.network.delete_port.assert_called_once_with(
self._ports[0]
)
class TestListPort(TestPort):
_ports = network_fakes.FakePort.create_ports(count=3)
columns = (
'ID',
'Name',
'MAC Address',
'Fixed IP Addresses',
'Status',
)
columns_long = (
'ID',
'Name',
'MAC Address',
'Fixed IP Addresses',
'Status',
'Security Groups',
'Device Owner',
'Tags',
)
data = []
for prt in _ports:
data.append((
prt.id,
prt.name,
prt.mac_address,
format_columns.ListDictColumn(prt.fixed_ips),
prt.status,
))
data_long = []
for prt in _ports:
data_long.append((
prt.id,
prt.name,
prt.mac_address,
format_columns.ListDictColumn(prt.fixed_ips),
prt.status,
format_columns.ListColumn(prt.security_group_ids),
prt.device_owner,
format_columns.ListColumn(prt.tags),
))
def setUp(self):
super(TestListPort, self).setUp()
# Get the command object to test
self.cmd = port.ListPort(self.app, self.namespace)
self.network.ports = mock.Mock(return_value=self._ports)
fake_router = network_fakes.FakeRouter.create_one_router({
'id': 'fake-router-id',
})
fake_network = network_fakes.FakeNetwork.create_one_network({
'id': 'fake-network-id',
})
self.network.find_router = mock.Mock(return_value=fake_router)
self.network.find_network = mock.Mock(return_value=fake_network)
self.app.client_manager.compute = mock.Mock()
def test_port_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with()
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_router_opt(self):
arglist = [
'--router', 'fake-router-name',
]
verifylist = [
('router', 'fake-router-name')
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'device_id': 'fake-router-id'
})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
@mock.patch.object(utils, 'find_resource')
def test_port_list_with_server_option(self, mock_find):
fake_server = compute_fakes.FakeServer.create_one_server()
mock_find.return_value = fake_server
arglist = [
'--server', 'fake-server-name',
]
verifylist = [
('server', 'fake-server-name'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(
device_id=fake_server.id)
mock_find.assert_called_once_with(mock.ANY, 'fake-server-name')
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_device_id_opt(self):
arglist = [
'--device-id', self._ports[0].device_id,
]
verifylist = [
('device_id', self._ports[0].device_id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'device_id': self._ports[0].device_id
})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_device_owner_opt(self):
arglist = [
'--device-owner', self._ports[0].device_owner,
]
verifylist = [
('device_owner', self._ports[0].device_owner)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'device_owner': self._ports[0].device_owner
})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_all_opt(self):
arglist = [
'--device-owner', self._ports[0].device_owner,
'--router', 'fake-router-name',
'--network', 'fake-network-name',
'--mac-address', self._ports[0].mac_address,
]
verifylist = [
('device_owner', self._ports[0].device_owner),
('router', 'fake-router-name'),
('network', 'fake-network-name'),
('mac_address', self._ports[0].mac_address)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'device_owner': self._ports[0].device_owner,
'device_id': 'fake-router-id',
'network_id': 'fake-network-id',
'mac_address': self._ports[0].mac_address
})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_mac_address_opt(self):
arglist = [
'--mac-address', self._ports[0].mac_address,
]
verifylist = [
('mac_address', self._ports[0].mac_address)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'mac_address': self._ports[0].mac_address
})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_fixed_ip_opt_ip_address(self):
ip_address = self._ports[0].fixed_ips[0]['ip_address']
arglist = [
'--fixed-ip', "ip-address=%s" % ip_address,
]
verifylist = [
('fixed_ip', [{'ip-address': ip_address}])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'fixed_ips': ['ip_address=%s' % ip_address]})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_fixed_ip_opt_ip_address_substr(self):
ip_address_ss = self._ports[0].fixed_ips[0]['ip_address'][:-1]
arglist = [
'--fixed-ip', "ip-substring=%s" % ip_address_ss,
]
verifylist = [
('fixed_ip', [{'ip-substring': ip_address_ss}])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'fixed_ips': ['ip_address_substr=%s' % ip_address_ss]})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_fixed_ip_opt_subnet_id(self):
subnet_id = self._ports[0].fixed_ips[0]['subnet_id']
arglist = [
'--fixed-ip', "subnet=%s" % subnet_id,
]
verifylist = [
('fixed_ip', [{'subnet': subnet_id}])
]
self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet(
{'id': subnet_id})
self.network.find_subnet = mock.Mock(return_value=self.fake_subnet)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'fixed_ips': ['subnet_id=%s' % subnet_id]})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_fixed_ip_opts(self):
subnet_id = self._ports[0].fixed_ips[0]['subnet_id']
ip_address = self._ports[0].fixed_ips[0]['ip_address']
arglist = [
'--fixed-ip', "subnet=%s,ip-address=%s" % (subnet_id,
ip_address)
]
verifylist = [
('fixed_ip', [{'subnet': subnet_id,
'ip-address': ip_address}])
]
self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet(
{'id': subnet_id})
self.network.find_subnet = mock.Mock(return_value=self.fake_subnet)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'fixed_ips': ['subnet_id=%s' % subnet_id,
'ip_address=%s' % ip_address]})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_fixed_ips(self):
subnet_id = self._ports[0].fixed_ips[0]['subnet_id']
ip_address = self._ports[0].fixed_ips[0]['ip_address']
arglist = [
'--fixed-ip', "subnet=%s" % subnet_id,
'--fixed-ip', "ip-address=%s" % ip_address,
]
verifylist = [
('fixed_ip', [{'subnet': subnet_id},
{'ip-address': ip_address}])
]
self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet(
{'id': subnet_id})
self.network.find_subnet = mock.Mock(return_value=self.fake_subnet)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(**{
'fixed_ips': ['subnet_id=%s' % subnet_id,
'ip_address=%s' % ip_address]})
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_list_port_with_long(self):
arglist = [
'--long',
]
verifylist = [
('long', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with()
self.assertEqual(self.columns_long, columns)
self.assertListItemEqual(self.data_long, list(data))
def test_port_list_project(self):
project = identity_fakes.FakeProject.create_one_project()
self.projects_mock.get.return_value = project
arglist = [
'--project', project.id,
]
verifylist = [
('project', project.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
filters = {'tenant_id': project.id, 'project_id': project.id}
self.network.ports.assert_called_once_with(**filters)
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_port_list_project_domain(self):
project = identity_fakes.FakeProject.create_one_project()
self.projects_mock.get.return_value = project
arglist = [
'--project', project.id,
'--project-domain', project.domain_id,
]
verifylist = [
('project', project.id),
('project_domain', project.domain_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
filters = {'tenant_id': project.id, 'project_id': project.id}
self.network.ports.assert_called_once_with(**filters)
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
def test_list_with_tag_options(self):
arglist = [
'--tags', 'red,blue',
'--any-tags', 'red,green',
'--not-tags', 'orange,yellow',
'--not-any-tags', 'black,white',
]
verifylist = [
('tags', ['red', 'blue']),
('any_tags', ['red', 'green']),
('not_tags', ['orange', 'yellow']),
('not_any_tags', ['black', 'white']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ports.assert_called_once_with(
**{'tags': 'red,blue',
'any_tags': 'red,green',
'not_tags': 'orange,yellow',
'not_any_tags': 'black,white'}
)
self.assertEqual(self.columns, columns)
self.assertListItemEqual(self.data, list(data))
class TestSetPort(TestPort):
_port = network_fakes.FakePort.create_one_port({'tags': ['green', 'red']})
def setUp(self):
super(TestSetPort, self).setUp()
self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet()
self.network.find_subnet = mock.Mock(return_value=self.fake_subnet)
self.network.find_port = mock.Mock(return_value=self._port)
self.network.update_port = mock.Mock(return_value=None)
self.network.set_tags = mock.Mock(return_value=None)
# Get the command object to test
self.cmd = port.SetPort(self.app, self.namespace)
def test_set_port_defaults(self):
arglist = [
self._port.name,
]
verifylist = [
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertFalse(self.network.update_port.called)
self.assertFalse(self.network.set_tags.called)
self.assertIsNone(result)
def test_set_port_fixed_ip(self):
_testport = network_fakes.FakePort.create_one_port(
{'fixed_ips': [{'ip_address': '0.0.0.1'}]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
'--fixed-ip', 'ip-address=10.0.0.12',
_testport.name,
]
verifylist = [
('fixed_ip', [{'ip-address': '10.0.0.12'}]),
('port', _testport.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'fixed_ips': [
{'ip_address': '0.0.0.1'},
{'ip_address': '10.0.0.12'},
],
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_set_port_fixed_ip_clear(self):
_testport = network_fakes.FakePort.create_one_port(
{'fixed_ips': [{'ip_address': '0.0.0.1'}]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
'--fixed-ip', 'ip-address=10.0.0.12',
'--no-fixed-ip',
_testport.name,
]
verifylist = [
('fixed_ip', [{'ip-address': '10.0.0.12'}]),
('no_fixed_ip', True)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'fixed_ips': [
{'ip_address': '10.0.0.12'},
],
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_set_port_dns_name(self):
arglist = [
'--dns-name', '8.8.8.8',
self._port.name,
]
verifylist = [
('dns_name', '8.8.8.8'),
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'dns_name': '8.8.8.8',
}
self.network.update_port.assert_called_once_with(self._port, **attrs)
self.assertIsNone(result)
def test_set_port_overwrite_binding_profile(self):
_testport = network_fakes.FakePort.create_one_port(
{'binding_profile': {'lok_i': 'visi_on'}})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
'--binding-profile', 'lok_i=than_os',
'--no-binding-profile',
_testport.name,
]
verifylist = [
('binding_profile', {'lok_i': 'than_os'}),
('no_binding_profile', True)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'binding:profile':
{'lok_i': 'than_os'},
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_overwrite_mac_address(self):
_testport = network_fakes.FakePort.create_one_port(
{'mac_address': '11:22:33:44:55:66'})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
'--mac-address', '66:55:44:33:22:11',
_testport.name,
]
verifylist = [
('mac_address', '66:55:44:33:22:11'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'mac_address': '66:55:44:33:22:11',
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_set_port_this(self):
arglist = [
'--disable',
'--no-fixed-ip',
'--no-binding-profile',
self._port.name,
]
verifylist = [
('disable', True),
('no_binding_profile', True),
('no_fixed_ip', True),
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'admin_state_up': False,
'binding:profile': {},
'fixed_ips': [],
}
self.network.update_port.assert_called_once_with(self._port, **attrs)
self.assertIsNone(result)
def test_set_port_that(self):
arglist = [
'--description', 'newDescription',
'--enable',
'--vnic-type', 'macvtap',
'--binding-profile', 'foo=bar',
'--host', 'binding-host-id-xxxx',
'--name', 'newName',
self._port.name,
]
verifylist = [
('description', 'newDescription'),
('enable', True),
('vnic_type', 'macvtap'),
('binding_profile', {'foo': 'bar'}),
('host', 'binding-host-id-xxxx'),
('name', 'newName'),
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'admin_state_up': True,
'binding:vnic_type': 'macvtap',
'binding:profile': {'foo': 'bar'},
'binding:host_id': 'binding-host-id-xxxx',
'description': 'newDescription',
'name': 'newName',
}
self.network.update_port.assert_called_once_with(self._port, **attrs)
self.assertIsNone(result)
def test_set_port_invalid_json_binding_profile(self):
arglist = [
'--binding-profile', '{"parent_name"}',
'test-port',
]
self.assertRaises(argparse.ArgumentTypeError,
self.check_parser,
self.cmd,
arglist,
None)
def test_set_port_invalid_key_value_binding_profile(self):
arglist = [
'--binding-profile', 'key',
'test-port',
]
self.assertRaises(argparse.ArgumentTypeError,
self.check_parser,
self.cmd,
arglist,
None)
def test_set_port_mixed_binding_profile(self):
arglist = [
'--binding-profile', 'foo=bar',
'--binding-profile', '{"foo2": "bar2"}',
self._port.name,
]
verifylist = [
('binding_profile', {'foo': 'bar', 'foo2': 'bar2'}),
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'binding:profile': {'foo': 'bar', 'foo2': 'bar2'},
}
self.network.update_port.assert_called_once_with(self._port, **attrs)
self.assertIsNone(result)
def test_set_port_security_group(self):
sg = network_fakes.FakeSecurityGroup.create_one_security_group()
self.network.find_security_group = mock.Mock(return_value=sg)
arglist = [
'--security-group', sg.id,
self._port.name,
]
verifylist = [
('security_group', [sg.id]),
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'security_group_ids': [sg.id],
}
self.network.update_port.assert_called_once_with(self._port, **attrs)
self.assertIsNone(result)
def test_set_port_security_group_append(self):
sg_1 = network_fakes.FakeSecurityGroup.create_one_security_group()
sg_2 = network_fakes.FakeSecurityGroup.create_one_security_group()
sg_3 = network_fakes.FakeSecurityGroup.create_one_security_group()
self.network.find_security_group = mock.Mock(side_effect=[sg_2, sg_3])
_testport = network_fakes.FakePort.create_one_port(
{'security_group_ids': [sg_1.id]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
'--security-group', sg_2.id,
'--security-group', sg_3.id,
_testport.name,
]
verifylist = [
('security_group', [sg_2.id, sg_3.id]),
('port', _testport.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'security_group_ids': [sg_1.id, sg_2.id, sg_3.id],
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_set_port_security_group_clear(self):
arglist = [
'--no-security-group',
self._port.name,
]
verifylist = [
('no_security_group', True),
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'security_group_ids': [],
}
self.network.update_port.assert_called_once_with(self._port, **attrs)
self.assertIsNone(result)
def test_set_port_security_group_replace(self):
sg1 = network_fakes.FakeSecurityGroup.create_one_security_group()
sg2 = network_fakes.FakeSecurityGroup.create_one_security_group()
_testport = network_fakes.FakePort.create_one_port(
{'security_group_ids': [sg1.id]})
self.network.find_port = mock.Mock(return_value=_testport)
self.network.find_security_group = mock.Mock(return_value=sg2)
arglist = [
'--security-group', sg2.id,
'--no-security-group',
_testport.name,
]
verifylist = [
('security_group', [sg2.id]),
('no_security_group', True)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'security_group_ids': [sg2.id],
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_set_port_allowed_address_pair(self):
arglist = [
'--allowed-address', 'ip-address=192.168.1.123',
self._port.name,
]
verifylist = [
('allowed_address_pairs', [{'ip-address': '192.168.1.123'}]),
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'allowed_address_pairs': [{'ip_address': '192.168.1.123'}],
}
self.network.update_port.assert_called_once_with(self._port, **attrs)
self.assertIsNone(result)
def test_set_port_append_allowed_address_pair(self):
_testport = network_fakes.FakePort.create_one_port(
{'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
'--allowed-address', 'ip-address=192.168.1.45',
_testport.name,
]
verifylist = [
('allowed_address_pairs', [{'ip-address': '192.168.1.45'}]),
('port', _testport.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'allowed_address_pairs': [{'ip_address': '192.168.1.123'},
{'ip_address': '192.168.1.45'}],
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_set_port_overwrite_allowed_address_pair(self):
_testport = network_fakes.FakePort.create_one_port(
{'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
'--allowed-address', 'ip-address=192.168.1.45',
'--no-allowed-address',
_testport.name,
]
verifylist = [
('allowed_address_pairs', [{'ip-address': '192.168.1.45'}]),
('no_allowed_address_pair', True),
('port', _testport.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'allowed_address_pairs': [{'ip_address': '192.168.1.45'}],
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_set_port_no_allowed_address_pairs(self):
arglist = [
'--no-allowed-address',
self._port.name,
]
verifylist = [
('no_allowed_address_pair', True),
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'allowed_address_pairs': [],
}
self.network.update_port.assert_called_once_with(self._port, **attrs)
self.assertIsNone(result)
def test_set_port_security_enabled(self):
arglist = [
'--enable-port-security',
self._port.id,
]
verifylist = [
('enable_port_security', True),
('port', self._port.id,)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.network.update_port.assert_called_once_with(self._port, **{
'port_security_enabled': True,
})
def test_set_port_security_disabled(self):
arglist = [
'--disable-port-security',
self._port.id,
]
verifylist = [
('disable_port_security', True),
('port', self._port.id,)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.network.update_port.assert_called_once_with(self._port, **{
'port_security_enabled': False,
})
def test_set_port_with_qos(self):
qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
_testport = network_fakes.FakePort.create_one_port(
{'qos_policy_id': None})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
'--qos-policy', qos_policy.id,
_testport.name,
]
verifylist = [
('qos_policy', qos_policy.id),
('port', _testport.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'qos_policy_id': qos_policy.id,
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_set_port_data_plane_status(self):
_testport = network_fakes.FakePort.create_one_port(
{'data_plane_status': None})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
'--data-plane-status', 'ACTIVE',
_testport.name,
]
verifylist = [
('data_plane_status', 'ACTIVE'),
('port', _testport.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'data_plane_status': 'ACTIVE',
}
self.network.update_port.assert_called_once_with(_testport, **attrs)
self.assertIsNone(result)
def test_set_port_invalid_data_plane_status_value(self):
arglist = [
'--data-plane-status', 'Spider-Man',
'test-port',
]
self.assertRaises(tests_utils.ParserException,
self.check_parser,
self.cmd,
arglist,
None)
def _test_set_tags(self, with_tags=True):
if with_tags:
arglist = ['--tag', 'red', '--tag', 'blue']
verifylist = [('tags', ['red', 'blue'])]
expected_args = ['red', 'blue', 'green']
else:
arglist = ['--no-tag']
verifylist = [('no_tag', True)]
expected_args = []
arglist.append(self._port.name)
verifylist.append(
('port', self._port.name))
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertFalse(self.network.update_port.called)
self.network.set_tags.assert_called_once_with(
self._port,
tests_utils.CompareBySet(expected_args))
self.assertIsNone(result)
def test_set_with_tags(self):
self._test_set_tags(with_tags=True)
def test_set_with_no_tag(self):
self._test_set_tags(with_tags=False)
class TestShowPort(TestPort):
# The port to show.
_port = network_fakes.FakePort.create_one_port()
columns, data = TestPort._get_common_cols_data(_port)
def setUp(self):
super(TestShowPort, self).setUp()
self.network.find_port = mock.Mock(return_value=self._port)
# Get the command object to test
self.cmd = port.ShowPort(self.app, self.namespace)
def test_show_no_options(self):
arglist = []
verifylist = []
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, verifylist)
def test_show_all_options(self):
arglist = [
self._port.name,
]
verifylist = [
('port', self._port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.find_port.assert_called_once_with(
self._port.name, ignore_missing=False)
self.assertEqual(self.columns, columns)
self.assertItemEqual(self.data, data)
class TestUnsetPort(TestPort):
def setUp(self):
super(TestUnsetPort, self).setUp()
self._testport = network_fakes.FakePort.create_one_port(
{'fixed_ips': [{'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152',
'ip_address': '0.0.0.1'},
{'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152',
'ip_address': '1.0.0.0'}],
'binding:profile': {'batman': 'Joker', 'Superman': 'LexLuthor'},
'tags': ['green', 'red'], })
self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet(
{'id': '042eb10a-3a18-4658-ab-cf47c8d03152'})
self.network.find_subnet = mock.Mock(return_value=self.fake_subnet)
self.network.find_port = mock.Mock(return_value=self._testport)
self.network.update_port = mock.Mock(return_value=None)
self.network.set_tags = mock.Mock(return_value=None)
# Get the command object to test
self.cmd = port.UnsetPort(self.app, self.namespace)
def test_unset_port_parameters(self):
arglist = [
'--fixed-ip',
'subnet=042eb10a-3a18-4658-ab-cf47c8d03152,ip-address=1.0.0.0',
'--binding-profile', 'Superman',
'--qos-policy',
self._testport.name,
]
verifylist = [
('fixed_ip', [{
'subnet': '042eb10a-3a18-4658-ab-cf47c8d03152',
'ip-address': '1.0.0.0'}]),
('binding_profile', ['Superman']),
('qos_policy', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'fixed_ips': [{
'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152',
'ip_address': '0.0.0.1'}],
'binding:profile': {'batman': 'Joker'},
'qos_policy_id': None
}
self.network.update_port.assert_called_once_with(
self._testport, **attrs)
self.assertIsNone(result)
def test_unset_port_fixed_ip_not_existent(self):
arglist = [
'--fixed-ip', 'ip-address=1.0.0.1',
'--binding-profile', 'Superman',
self._testport.name,
]
verifylist = [
('fixed_ip', [{'ip-address': '1.0.0.1'}]),
('binding_profile', ['Superman']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action,
parsed_args)
def test_unset_port_binding_profile_not_existent(self):
arglist = [
'--fixed-ip', 'ip-address=1.0.0.0',
'--binding-profile', 'Neo',
self._testport.name,
]
verifylist = [
('fixed_ip', [{'ip-address': '1.0.0.0'}]),
('binding_profile', ['Neo']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action,
parsed_args)
def test_unset_security_group(self):
_fake_sg1 = network_fakes.FakeSecurityGroup.create_one_security_group()
_fake_sg2 = network_fakes.FakeSecurityGroup.create_one_security_group()
_fake_port = network_fakes.FakePort.create_one_port(
{'security_group_ids': [_fake_sg1.id, _fake_sg2.id]})
self.network.find_port = mock.Mock(return_value=_fake_port)
self.network.find_security_group = mock.Mock(return_value=_fake_sg2)
arglist = [
'--security-group', _fake_sg2.id,
_fake_port.name,
]
verifylist = [
('security_group_ids', [_fake_sg2.id]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'security_group_ids': [_fake_sg1.id]
}
self.network.update_port.assert_called_once_with(
_fake_port, **attrs)
self.assertIsNone(result)
def test_unset_port_security_group_not_existent(self):
_fake_sg1 = network_fakes.FakeSecurityGroup.create_one_security_group()
_fake_sg2 = network_fakes.FakeSecurityGroup.create_one_security_group()
_fake_port = network_fakes.FakePort.create_one_port(
{'security_group_ids': [_fake_sg1.id]})
self.network.find_security_group = mock.Mock(return_value=_fake_sg2)
arglist = [
'--security-group', _fake_sg2.id,
_fake_port.name,
]
verifylist = [
('security_group_ids', [_fake_sg2.id]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action,
parsed_args)
def test_unset_port_allowed_address_pair(self):
_fake_port = network_fakes.FakePort.create_one_port(
{'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]})
self.network.find_port = mock.Mock(return_value=_fake_port)
arglist = [
'--allowed-address', 'ip-address=192.168.1.123',
_fake_port.name,
]
verifylist = [
('allowed_address_pairs', [{'ip-address': '192.168.1.123'}]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'allowed_address_pairs': [],
}
self.network.update_port.assert_called_once_with(_fake_port, **attrs)
self.assertIsNone(result)
def test_unset_port_allowed_address_pair_not_existent(self):
_fake_port = network_fakes.FakePort.create_one_port(
{'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]})
self.network.find_port = mock.Mock(return_value=_fake_port)
arglist = [
'--allowed-address', 'ip-address=192.168.1.45',
_fake_port.name,
]
verifylist = [
('allowed_address_pairs', [{'ip-address': '192.168.1.45'}]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action,
parsed_args)
def test_unset_port_data_plane_status(self):
_fake_port = network_fakes.FakePort.create_one_port(
{'data_plane_status': 'ACTIVE'})
self.network.find_port = mock.Mock(return_value=_fake_port)
arglist = [
'--data-plane-status',
_fake_port.name,
]
verifylist = [
('data_plane_status', True),
('port', _fake_port.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {
'data_plane_status': None,
}
self.network.update_port.assert_called_once_with(_fake_port, **attrs)
self.assertIsNone(result)
def _test_unset_tags(self, with_tags=True):
if with_tags:
arglist = ['--tag', 'red', '--tag', 'blue']
verifylist = [('tags', ['red', 'blue'])]
expected_args = ['green']
else:
arglist = ['--all-tag']
verifylist = [('all_tag', True)]
expected_args = []
arglist.append(self._testport.name)
verifylist.append(
('port', self._testport.name))
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertFalse(self.network.update_port.called)
self.network.set_tags.assert_called_once_with(
self._testport,
tests_utils.CompareBySet(expected_args))
self.assertIsNone(result)
def test_unset_with_tags(self):
self._test_unset_tags(with_tags=True)
def test_unset_with_all_tag(self):
self._test_unset_tags(with_tags=False)
|
|
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = ('api.jfisher (Jeff Fisher), '
'api.eric@google.com (Eric Bidelman)')
import os
import re
import getpass
import StringIO
import unittest
import gdata.docs.service
import gdata.spreadsheet.service
username = ''
password = ''
class DocumentListQueryTest(unittest.TestCase):
def setUp(self):
self.doclist = client
self.feed = self.doclist.GetDocumentListFeed()
def testGetDocumentsListFeed(self):
self.assert_(isinstance(self.feed, gdata.docs.DocumentListFeed))
uri = 'http://docs.google.com/feeds/documents/private/full/?max-results=1'
# Query using GetDocumentListFeed()
feed = self.doclist.GetDocumentListFeed(uri)
self.assert_(isinstance(feed, gdata.docs.DocumentListFeed))
self.assertEqual(len(feed.entry), 1)
self.assertEqual(self.feed.entry[0].id.text, feed.entry[0].id.text)
self.assertEqual(self.feed.entry[0].title.text, feed.entry[0].title.text)
# Query using QueryDocumentListFeed()
feed2 = self.doclist.QueryDocumentListFeed(uri)
self.assertEqual(len(feed2.entry), 1)
self.assertEqual(self.feed.entry[0].id.text, feed2.entry[0].id.text)
self.assertEqual(self.feed.entry[0].title.text, feed2.entry[0].title.text)
def testGetDocumentsListEntry(self):
self_link = self.feed.entry[0].GetSelfLink().href
entry = self.doclist.GetDocumentListEntry(self_link)
self.assert_(isinstance(entry, gdata.docs.DocumentListEntry))
self.assertEqual(self.feed.entry[0].id.text, entry.id.text)
self.assertEqual(self.feed.entry[0].title.text, entry.title.text)
self.assert_(self.feed.entry[0].resourceId.text is not None)
self.assert_(self.feed.entry[0].lastModifiedBy is not None)
self.assert_(self.feed.entry[0].lastViewed is not None)
def testGetDocumentsListAclFeed(self):
uri = ('http://docs.google.com/feeds/documents/private/full/'
'-/mine?max-results=1')
feed = self.doclist.GetDocumentListFeed(uri)
feed_link = feed.entry[0].GetAclLink().href
acl_feed = self.doclist.GetDocumentListAclFeed(feed_link)
self.assert_(isinstance(acl_feed, gdata.docs.DocumentListAclFeed))
self.assert_(isinstance(acl_feed.entry[0], gdata.docs.DocumentListAclEntry))
self.assert_(acl_feed.entry[0].scope is not None)
self.assert_(acl_feed.entry[0].role is not None)
class DocumentListAclTest(unittest.TestCase):
def setUp(self):
self.doclist = client
uri = ('http://docs.google.com/feeds/documents/private/full'
'/-/mine?max-results=1')
self.feed = self.doclist.GetDocumentListFeed(uri)
self.EMAIL = 'x@example.com'
self.SCOPE_TYPE = 'user'
self.ROLE_VALUE = 'reader'
def testCreateAndUpdateAndDeleteAcl(self):
# Add new ACL
scope = gdata.docs.Scope(value=self.EMAIL, type=self.SCOPE_TYPE)
role = gdata.docs.Role(value=self.ROLE_VALUE)
acl_entry = self.doclist.Post(
gdata.docs.DocumentListAclEntry(scope=scope, role=role),
self.feed.entry[0].GetAclLink().href,
converter=gdata.docs.DocumentListAclEntryFromString)
self.assert_(isinstance(acl_entry, gdata.docs.DocumentListAclEntry))
self.assertEqual(acl_entry.scope.value, self.EMAIL)
self.assertEqual(acl_entry.scope.type, self.SCOPE_TYPE)
self.assertEqual(acl_entry.role.value, self.ROLE_VALUE)
# Update the user's role
ROLE_VALUE = 'writer'
acl_entry.role.value = ROLE_VALUE
updated_acl_entry = self.doclist.Put(
acl_entry, acl_entry.GetEditLink().href,
converter=gdata.docs.DocumentListAclEntryFromString)
self.assertEqual(updated_acl_entry.scope.value, self.EMAIL)
self.assertEqual(updated_acl_entry.scope.type, self.SCOPE_TYPE)
self.assertEqual(updated_acl_entry.role.value, ROLE_VALUE)
# Delete the ACL
self.doclist.Delete(updated_acl_entry.GetEditLink().href)
# Make sure entry was actually deleted
acl_feed = self.doclist.GetDocumentListAclFeed(
self.feed.entry[0].GetAclLink().href)
for acl_entry in acl_feed.entry:
self.assert_(acl_entry.scope.value != self.EMAIL)
class DocumentListCreateAndDeleteTest(unittest.TestCase):
def setUp(self):
self.doclist = client
self.TITLE = 'Test title'
self.new_entry = gdata.docs.DocumentListEntry()
category = gdata.atom.Category(scheme=gdata.docs.service.DATA_KIND_SCHEME,
term=gdata.docs.service.DOCUMENT_KIND_TERM,
label='document')
self.new_entry.category.append(category)
def testCreateAndDeleteEmptyDocumentSlugHeaderTitle(self):
created_entry = self.doclist.Post(self.new_entry,
'/feeds/documents/private/full',
extra_headers={'Slug': self.TITLE})
self.doclist.Delete(created_entry.GetEditLink().href)
self.assertEqual(created_entry.title.text, self.TITLE)
self.assertEqual(created_entry.category[0].label, 'document')
def testCreateAndDeleteEmptyDocumentAtomTitle(self):
self.new_entry.title = gdata.atom.Title(text=self.TITLE)
created_entry = self.doclist.Post(self.new_entry,
'/feeds/documents/private/full')
self.doclist.Delete(created_entry.GetEditLink().href)
self.assertEqual(created_entry.title.text, self.TITLE)
self.assertEqual(created_entry.category[0].label, 'document')
def testCreateAndDeleteEmptySpreadsheet(self):
self.new_entry.title = gdata.atom.Title(text=self.TITLE)
self.new_entry.category[0].term = gdata.docs.service.SPREADSHEET_KIND_TERM
self.new_entry.category[0].label = 'spreadsheet'
created_entry = self.doclist.Post(self.new_entry,
'/feeds/documents/private/full')
self.doclist.Delete(created_entry.GetEditLink().href)
self.assertEqual(created_entry.title.text, self.TITLE)
self.assertEqual(created_entry.category[0].label, 'spreadsheet')
def testCreateAndDeleteEmptyPresentation(self):
self.new_entry.title = gdata.atom.Title(text=self.TITLE)
self.new_entry.category[0].term = gdata.docs.service.PRESENTATION_KIND_TERM
self.new_entry.category[0].label = 'presentation'
created_entry = self.doclist.Post(self.new_entry,
'/feeds/documents/private/full')
self.doclist.Delete(created_entry.GetEditLink().href)
self.assertEqual(created_entry.title.text, self.TITLE)
self.assertEqual(created_entry.category[0].label, 'presentation')
def testCreateAndDeleteFolder(self):
folder_name = 'TestFolder'
folder = self.doclist.CreateFolder(folder_name)
self.assertEqual(folder.title.text, folder_name)
self.doclist.Delete(folder.GetEditLink().href)
def testCreateAndDeleteFolderInFolder(self):
DEST_FOLDER_NAME = 'TestFolder'
CREATED_FOLDER_NAME = 'TestFolder2'
dest_folder = self.doclist.CreateFolder(DEST_FOLDER_NAME)
new_folder = self.doclist.CreateFolder(CREATED_FOLDER_NAME, dest_folder)
for category in new_folder.category:
if category.scheme.startswith(gdata.docs.service.FOLDERS_SCHEME_PREFIX):
self.assertEqual(new_folder.category[0].label, DEST_FOLDER_NAME)
break
# delete the folders we created
dest_folder = self.doclist.Get(dest_folder.GetSelfLink().href)
self.doclist.Delete(dest_folder.GetEditLink().href)
uri = 'http://docs.google.com/feeds/documents/private/full'
uri += '/-/folder?q=%s&showfolders=true' % (CREATED_FOLDER_NAME,)
folders = self.doclist.GetDocumentListFeed(uri)
self.doclist.Delete(folders.entry[0].GetEditLink().href)
class DocumentListMoveInAndOutOfFolderTest(unittest.TestCase):
def setUp(self):
self.doclist = client
self.folder_name = 'TestFolder'
self.folder = self.doclist.CreateFolder(self.folder_name)
self.doc_title = 'TestDoc'
self.ms = gdata.MediaSource(file_path='test.doc',
content_type='application/msword')
def tearDown(self):
folder = self.doclist.Get(self.folder.GetSelfLink().href)
self.doclist.Delete(folder.GetEditLink().href)
def testUploadDocumentToFolder(self):
created_entry = self.doclist.UploadDocument(self.ms, self.doc_title,
self.folder)
for category in created_entry.category:
if category.scheme.startswith(gdata.docs.service.FOLDERS_SCHEME_PREFIX):
self.assertEqual(category.label, self.folder_name)
break
# delete the doc we created
created_entry = self.doclist.Get(created_entry.GetSelfLink().href)
match = re.search('\/(document%3A[^\/]*)\/?.*?\/(.*)$',
created_entry.GetEditLink().href)
edit_uri = 'http://docs.google.com/feeds/documents/private/full/'
edit_uri += '%s/%s' % (match.group(1), match.group(2))
self.doclist.Delete(edit_uri)
def testMoveDocumentInAndOutOfFolder(self):
created_entry = self.doclist.UploadDocument(self.ms, self.doc_title)
moved_entry = self.doclist.MoveDocumentIntoFolder(created_entry,
self.folder)
for category in moved_entry.category:
if category.scheme.startswith(gdata.docs.service.FOLDERS_SCHEME_PREFIX):
self.assertEqual(category.label, self.folder_name)
break
self.doclist.MoveOutOfFolder(moved_entry)
moved_entry = self.doclist.Get(moved_entry.GetSelfLink().href)
for category in moved_entry.category:
starts_with_folder__prefix = category.scheme.startswith(
gdata.docs.service.FOLDERS_SCHEME_PREFIX)
self.assert_(not starts_with_folder__prefix)
created_entry = self.doclist.Get(created_entry.GetSelfLink().href)
self.doclist.Delete(created_entry.GetEditLink().href)
def testMoveFolderIntoFolder(self):
dest_folder_name = 'DestFolderName'
dest_folder = self.doclist.CreateFolder(dest_folder_name)
self.doclist.MoveFolderIntoFolder(self.folder, dest_folder)
self.folder = self.doclist.Get(self.folder.GetSelfLink().href)
folder_was_moved = False
for category in self.folder.category:
if category.term == dest_folder_name:
folder_was_moved = True
break
self.assert_(folder_was_moved)
#cleanup
dest_folder = self.doclist.Get(dest_folder.GetSelfLink().href)
self.doclist.Delete(dest_folder.GetEditLink().href)
class DocumentListUploadTest(unittest.TestCase):
def setUp(self):
self.doclist = client
def testUploadAndDeleteDocument(self):
ms = gdata.MediaSource(file_path='test.doc',
content_type='application/msword')
entry = self.doclist.UploadDocument(ms, 'test doc')
self.assertEqual(entry.title.text, 'test doc')
self.assertEqual(entry.category[0].label, 'document')
self.assert_(isinstance(entry, gdata.docs.DocumentListEntry))
self.doclist.Delete(entry.GetEditLink().href)
def testUploadAndDeletePresentation(self):
ms = gdata.MediaSource(file_path='test.ppt',
content_type='application/vnd.ms-powerpoint')
entry = self.doclist.UploadPresentation(ms, 'test preso')
self.assertEqual(entry.title.text, 'test preso')
self.assertEqual(entry.category[0].label, 'presentation')
self.assert_(isinstance(entry, gdata.docs.DocumentListEntry))
self.doclist.Delete(entry.GetEditLink().href)
def testUploadAndDeleteSpreadsheet(self):
ms = gdata.MediaSource(file_path='test.csv',
content_type='text/csv')
entry = self.doclist.UploadSpreadsheet(ms, 'test spreadsheet')
self.assert_(entry.title.text == 'test spreadsheet')
self.assertEqual(entry.category[0].label, 'spreadsheet')
self.assert_(isinstance(entry, gdata.docs.DocumentListEntry))
self.doclist.Delete(entry.GetEditLink().href)
class DocumentListUpdateTest(unittest.TestCase):
def setUp(self):
self.doclist = client
self.TITLE = 'CreatedTestDoc'
new_entry = gdata.docs.DocumentListEntry()
new_entry.title = gdata.atom.Title(text=self.TITLE)
new_entry.category.append(
gdata.atom.Category(scheme=gdata.docs.service.DATA_KIND_SCHEME,
term=gdata.docs.service.DOCUMENT_KIND_TERM,
label='document'))
self.created_entry = self.doclist.Post(new_entry,
'/feeds/documents/private/full')
def tearDown(self):
# Delete the test doc we created
self_link = self.created_entry.GetSelfLink().href
entry = self.doclist.GetDocumentListEntry(self_link)
self.doclist.Delete(entry.GetEditLink().href)
def testUpdateDocumentMetadataAndContent(self):
title = 'UpdatedTestDoc'
# Update metadata
self.created_entry.title.text = title
updated_entry = self.doclist.Put(self.created_entry,
self.created_entry.GetEditLink().href)
self.assertEqual(updated_entry.title.text, title)
# Update document's content
ms = gdata.MediaSource(file_path='test.doc',
content_type='application/msword')
uri = updated_entry.GetEditMediaLink().href
updated_entry = self.doclist.Put(ms, uri)
self.assertEqual(updated_entry.title.text, title)
# Append content to document
data = 'data to append'
ms = gdata.MediaSource(file_handle=StringIO.StringIO(data),
content_type='text/plain',
content_length=len(data))
uri = updated_entry.GetEditMediaLink().href + '?append=true'
updated_entry = self.doclist.Put(ms, uri)
class DocumentListExportTest(unittest.TestCase):
def setUp(self):
self.doclist = client
self.spreadsheets = spreadsheets
def testExportDocument(self):
query = ('http://docs.google.com/feeds/documents/private/full'
'/-/document?max-results=1')
feed = self.doclist.QueryDocumentListFeed(query)
file_paths = ['./downloadedTest.doc', './downloadedTest.html',
'./downloadedTest.odt', './downloadedTest.pdf',
'./downloadedTest.png', './downloadedTest.rtf',
'./downloadedTest.txt', './downloadedTest.zip']
for path in file_paths:
self.doclist.DownloadDocument(feed.entry[0], path)
self.assert_(os.path.exists(path))
self.assert_(os.path.getsize(path))
os.remove(path)
def testExportPresentation(self):
query = ('http://docs.google.com/feeds/documents/private/full'
'/-/presentation?max-results=1')
feed = self.doclist.QueryDocumentListFeed(query)
file_paths = ['./downloadedTest.pdf', './downloadedTest.ppt',
'./downloadedTest.swf', './downloadedTest.txt']
for path in file_paths:
self.doclist.DownloadPresentation(feed.entry[0].resourceId.text, path)
self.assert_(os.path.exists(path))
self.assert_(os.path.getsize(path))
os.remove(path)
def testExportSpreadsheet(self):
query = ('http://docs.google.com/feeds/documents/private/full'
'/-/spreadsheet?max-results=1')
feed = self.doclist.QueryDocumentListFeed(query)
file_paths = ['./downloadedTest.xls', './downloadedTest.csv',
'./downloadedTest.pdf', './downloadedTest.ods',
'./downloadedTest.tsv', './downloadedTest.html']
docs_token = self.doclist.GetClientLoginToken()
self.doclist.SetClientLoginToken(self.spreadsheets.GetClientLoginToken())
for path in file_paths:
self.doclist.DownloadSpreadsheet(feed.entry[0], path)
self.assert_(os.path.exists(path))
self.assert_(os.path.getsize(path))
os.remove(path)
self.doclist.SetClientLoginToken(docs_token)
def testExportNonExistentDocument(self):
path = './ned.txt'
exception_raised = False
try:
self.doclist.DownloadDocument('non_existent_doc', path)
except Exception, e: # expected
exception_raised = True
self.assert_(exception_raised)
self.assert_(not os.path.exists(path))
if __name__ == '__main__':
print ('DocList API Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
client = gdata.docs.service.DocsService()
spreadsheets = gdata.spreadsheet.service.SpreadsheetsService()
client.ClientLogin(username, password,
source='Document List Client Unit Tests')
spreadsheets.ClientLogin(username, password,
source='Document List Client Unit Tests')
unittest.main()
|
|
import abc
from typing import Union
from . import xdr as stellar_xdr
from .exceptions import MemoInvalidException
from .type_checked import type_checked
from .utils import hex_to_bytes
__all__ = ["Memo", "NoneMemo", "TextMemo", "IdMemo", "HashMemo", "ReturnHashMemo"]
@type_checked
class Memo(object, metaclass=abc.ABCMeta):
"""The :class:`Memo` object, which represents the base class for memos for
use with Stellar transactions.
The memo for a transaction contains optional extra information about the
transaction taking place. It is the responsibility of the client to
interpret this value.
See the following implementations that serve a more practical use with the
library:
* :class:`NoneMemo` - No memo.
* :class:`TextMemo` - A string encoded using either ASCII or UTF-8, up to 28-bytes long.
* :class:`IdMemo` - A 64 bit unsigned integer.
* :class:`HashMemo` - A 32 byte hash.
* :class:`RetHashMemo` - A 32 byte hash intended to be interpreted as the hash of the transaction the sender is refunding.
See `Stellar's documentation on Transactions
<https://developers.stellar.org/docs/glossary/transactions/#memo>`__
for more information on how memos are used within transactions, as well as
information on the available types of memos.
"""
@abc.abstractmethod
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`Memo`."""
@staticmethod
def from_xdr_object(xdr_object: stellar_xdr.Memo) -> "Memo":
"""Returns an Memo object from XDR memo object."""
xdr_types = {
stellar_xdr.MemoType.MEMO_TEXT: TextMemo,
stellar_xdr.MemoType.MEMO_ID: IdMemo,
stellar_xdr.MemoType.MEMO_HASH: HashMemo,
stellar_xdr.MemoType.MEMO_RETURN: ReturnHashMemo,
stellar_xdr.MemoType.MEMO_NONE: NoneMemo,
}
# TODO: Maybe we should raise Key Error here
memo_cls = xdr_types.get(xdr_object.type, NoneMemo)
return memo_cls.from_xdr_object(xdr_object) # type: ignore[attr-defined]
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
pass # pragma: no cover
@type_checked
class NoneMemo(Memo):
"""The :class:`NoneMemo`, which represents no memo for a transaction."""
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "NoneMemo":
"""Returns an :class:`NoneMemo` object from XDR memo object."""
return cls()
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`NoneMemo`."""
return stellar_xdr.Memo(type=stellar_xdr.MemoType.MEMO_NONE)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return True
def __str__(self):
return "<NoneMemo>"
@type_checked
class TextMemo(Memo):
"""The :class:`TextMemo`, which represents ``MEMO_TEXT`` in a transaction.
:param text: A string encoded using either ASCII or UTF-8, up to
28-bytes long. Note, `text` can be anything,
see `this issue <https://github.com/stellar/new-docs/issues/555>`__ for more information.
:raises: :exc:`MemoInvalidException <stellar_sdk.exceptions.MemoInvalidException>`:
if ``text`` is not a valid text memo.
"""
def __init__(self, text: Union[str, bytes]) -> None:
if not isinstance(text, bytes):
text = bytes(text, encoding="utf-8")
self.memo_text: bytes = text
length = len(self.memo_text)
if length > 28:
raise MemoInvalidException(
f"Text should be <= 28 bytes (ascii encoded), got {length} bytes."
)
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "TextMemo":
"""Returns an :class:`TextMemo` object from XDR memo object."""
assert xdr_object.text is not None
return cls(bytes(xdr_object.text))
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`TextMemo`."""
return stellar_xdr.Memo(
type=stellar_xdr.MemoType.MEMO_TEXT, text=self.memo_text
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.memo_text == other.memo_text
def __str__(self):
return f"<TextMemo [memo={self.memo_text}]>"
@type_checked
class IdMemo(Memo):
"""The :class:`IdMemo` which represents ``MEMO_ID`` in a transaction.
:param memo_id: A 64 bit unsigned integer.
:raises:
:exc:`MemoInvalidException <stellar_sdk.exceptions.MemoInvalidException>`:
if ``id`` is not a valid id memo.
"""
def __init__(self, memo_id: int) -> None:
if memo_id < 0 or memo_id > 2 ** 64 - 1:
raise MemoInvalidException(
"IdMemo is an unsigned 64-bit integer and the max valid value is 18446744073709551615."
)
self.memo_id: int = memo_id
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "IdMemo":
"""Returns an :class:`IdMemo` object from XDR memo object."""
assert xdr_object.id is not None
return cls(xdr_object.id.uint64)
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`IdMemo`."""
return stellar_xdr.Memo(
type=stellar_xdr.MemoType.MEMO_ID, id=stellar_xdr.Uint64(self.memo_id)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.memo_id == other.memo_id
def __str__(self):
return f"<IdMemo [memo={self.memo_id}]>"
@type_checked
class HashMemo(Memo):
"""The :class:`HashMemo` which represents ``MEMO_HASH`` in a transaction.
:param memo_hash: A 32 byte hash hex encoded string.
:raises: :exc:`MemoInvalidException <stellar_sdk.exceptions.MemoInvalidException>`:
if ``memo_hash`` is not a valid hash memo.
"""
def __init__(self, memo_hash: Union[bytes, str]) -> None:
memo_hash = hex_to_bytes(memo_hash)
length = len(memo_hash)
if length != 32:
raise MemoInvalidException(
f"The length of HashMemo should be 32 bytes, got {length} bytes."
)
self.memo_hash: bytes = memo_hash
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "HashMemo":
"""Returns an :class:`HashMemo` object from XDR memo object."""
assert xdr_object.hash is not None
return cls(xdr_object.hash.hash)
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`HashMemo`."""
return stellar_xdr.Memo(
type=stellar_xdr.MemoType.MEMO_HASH, hash=stellar_xdr.Hash(self.memo_hash)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.memo_hash == other.memo_hash
def __str__(self):
return f"<HashMemo [memo={self.memo_hash}]>"
@type_checked
class ReturnHashMemo(Memo):
"""The :class:`ReturnHashMemo` which represents ``MEMO_RETURN`` in a transaction.
MEMO_RETURN is typically used with refunds/returns over the network - it is
a 32 byte hash intended to be interpreted as the hash of the transaction
the sender is refunding.
:param memo_return: A 32 byte hash or hex encoded string intended to be interpreted as the
hash of the transaction the sender is refunding.
:raises: :exc:`MemoInvalidException <stellar_sdk.exceptions.MemoInvalidException>`:
if ``memo_return`` is not a valid return hash memo.
"""
def __init__(self, memo_return: Union[bytes, str]) -> None:
memo_return = hex_to_bytes(memo_return)
length = len(memo_return)
if length != 32:
raise MemoInvalidException(
f"The length of ReturnHashMemo should be 32 bytes, got {length} bytes."
)
self.memo_return: bytes = memo_return
@classmethod
def from_xdr_object(cls, xdr_object: stellar_xdr.Memo) -> "ReturnHashMemo":
"""Returns an :class:`ReturnHashMemo` object from XDR memo object."""
assert xdr_object.ret_hash is not None
return cls(xdr_object.ret_hash.hash)
def to_xdr_object(self) -> stellar_xdr.Memo:
"""Creates an XDR Memo object that represents this :class:`ReturnHashMemo`."""
return stellar_xdr.Memo(
type=stellar_xdr.MemoType.MEMO_RETURN,
ret_hash=stellar_xdr.Hash(self.memo_return),
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return self.memo_return == other.memo_return
def __str__(self):
return f"<ReturnHashMemo [memo={self.memo_return}]>"
|
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows specific tests. These are implicitly run by test_psutil.py."""
import errno
import os
import platform
import signal
import subprocess
import sys
import time
import traceback
from test_psutil import (get_test_subprocess, reap_children, unittest)
try:
import wmi
except ImportError:
wmi = None
try:
import win32api
import win32con
except ImportError:
win32api = win32con = None
from psutil._compat import PY3, callable, long
from psutil._pswindows import ACCESS_DENIED_SET
import _psutil_windows
import psutil
def wrap_exceptions(fun):
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
raise psutil.AccessDenied(None, None)
if err.errno == errno.ESRCH:
raise psutil.NoSuchProcess(None, None)
raise
return wrapper
class WindowsSpecificTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pid = get_test_subprocess().pid
@classmethod
def tearDownClass(cls):
reap_children()
def test_issue_24(self):
p = psutil.Process(0)
self.assertRaises(psutil.AccessDenied, p.kill)
def test_special_pid(self):
p = psutil.Process(4)
self.assertEqual(p.name(), 'System')
# use __str__ to access all common Process properties to check
# that nothing strange happens
str(p)
p.username()
self.assertTrue(p.create_time() >= 0.0)
try:
rss, vms = p.memory_info()
except psutil.AccessDenied:
# expected on Windows Vista and Windows 7
if not platform.uname()[1] in ('vista', 'win-7', 'win7'):
raise
else:
self.assertTrue(rss > 0)
def test_send_signal(self):
p = psutil.Process(self.pid)
self.assertRaises(ValueError, p.send_signal, signal.SIGINT)
def test_nic_names(self):
p = subprocess.Popen(['ipconfig', '/all'], stdout=subprocess.PIPE)
out = p.communicate()[0]
if PY3:
out = str(out, sys.stdout.encoding)
nics = psutil.net_io_counters(pernic=True).keys()
for nic in nics:
if "pseudo-interface" in nic.replace(' ', '-').lower():
continue
if nic not in out:
self.fail(
"%r nic wasn't found in 'ipconfig /all' output" % nic)
def test_exe(self):
for p in psutil.process_iter():
try:
self.assertEqual(os.path.basename(p.exe()), p.name())
except psutil.Error:
pass
# --- Process class tests
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_name(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(p.name(), w.Caption)
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_exe(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(p.exe(), w.ExecutablePath)
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_cmdline(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(' '.join(p.cmdline()),
w.CommandLine.replace('"', ''))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_username(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
domain, _, username = w.GetOwner()
username = "%s\\%s" % (domain, username)
self.assertEqual(p.username(), username)
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_rss_memory(self):
time.sleep(0.1)
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
rss = p.memory_info().rss
self.assertEqual(rss, int(w.WorkingSetSize))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_vms_memory(self):
time.sleep(0.1)
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
vms = p.memory_info().vms
# http://msdn.microsoft.com/en-us/library/aa394372(VS.85).aspx
# ...claims that PageFileUsage is represented in Kilo
# bytes but funnily enough on certain platforms bytes are
# returned instead.
wmi_usage = int(w.PageFileUsage)
if (vms != wmi_usage) and (vms != wmi_usage * 1024):
self.fail("wmi=%s, psutil=%s" % (wmi_usage, vms))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_create_time(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
wmic_create = str(w.CreationDate.split('.')[0])
psutil_create = time.strftime("%Y%m%d%H%M%S",
time.localtime(p.create_time()))
self.assertEqual(wmic_create, psutil_create)
# --- psutil namespace functions and constants tests
@unittest.skipUnless(hasattr(os, 'NUMBER_OF_PROCESSORS'),
'NUMBER_OF_PROCESSORS env var is not available')
def test_cpu_count(self):
num_cpus = int(os.environ['NUMBER_OF_PROCESSORS'])
self.assertEqual(num_cpus, psutil.cpu_count())
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_total_phymem(self):
w = wmi.WMI().Win32_ComputerSystem()[0]
self.assertEqual(int(w.TotalPhysicalMemory),
psutil.virtual_memory().total)
# @unittest.skipIf(wmi is None, "wmi module is not installed")
# def test__UPTIME(self):
# # _UPTIME constant is not public but it is used internally
# # as value to return for pid 0 creation time.
# # WMI behaves the same.
# w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
# p = psutil.Process(0)
# wmic_create = str(w.CreationDate.split('.')[0])
# psutil_create = time.strftime("%Y%m%d%H%M%S",
# time.localtime(p.create_time()))
#
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_pids(self):
# Note: this test might fail if the OS is starting/killing
# other processes in the meantime
w = wmi.WMI().Win32_Process()
wmi_pids = [x.ProcessId for x in w]
wmi_pids.sort()
psutil_pids = psutil.pids()
psutil_pids.sort()
if wmi_pids != psutil_pids:
difference = \
filter(lambda x: x not in wmi_pids, psutil_pids) + \
filter(lambda x: x not in psutil_pids, wmi_pids)
self.fail("difference: " + str(difference))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_disks(self):
ps_parts = psutil.disk_partitions(all=True)
wmi_parts = wmi.WMI().Win32_LogicalDisk()
for ps_part in ps_parts:
for wmi_part in wmi_parts:
if ps_part.device.replace('\\', '') == wmi_part.DeviceID:
if not ps_part.mountpoint:
# this is usually a CD-ROM with no disk inserted
break
try:
usage = psutil.disk_usage(ps_part.mountpoint)
except OSError as err:
if err.errno == errno.ENOENT:
# usually this is the floppy
break
else:
raise
self.assertEqual(usage.total, int(wmi_part.Size))
wmi_free = int(wmi_part.FreeSpace)
self.assertEqual(usage.free, wmi_free)
# 10 MB tollerance
if abs(usage.free - wmi_free) > 10 * 1024 * 1024:
self.fail("psutil=%s, wmi=%s" % (
usage.free, wmi_free))
break
else:
self.fail("can't find partition %s" % repr(ps_part))
@unittest.skipIf(win32api is None, "pywin32 module is not installed")
def test_num_handles(self):
p = psutil.Process(os.getpid())
before = p.num_handles()
handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
win32con.FALSE, os.getpid())
after = p.num_handles()
self.assertEqual(after, before + 1)
win32api.CloseHandle(handle)
self.assertEqual(p.num_handles(), before)
@unittest.skipIf(win32api is None, "pywin32 module is not installed")
def test_num_handles_2(self):
# Note: this fails from time to time; I'm keen on thinking
# it doesn't mean something is broken
def call(p, attr):
attr = getattr(p, name, None)
if attr is not None and callable(attr):
attr()
else:
attr
p = psutil.Process(self.pid)
failures = []
for name in dir(psutil.Process):
if name.startswith('_') \
or name.startswith('set_') \
or name.startswith('get') \
or name in ('terminate', 'kill', 'suspend', 'resume',
'nice', 'send_signal', 'wait', 'children',
'as_dict'):
continue
else:
try:
call(p, name)
num1 = p.num_handles()
call(p, name)
num2 = p.num_handles()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
else:
if num2 > num1:
fail = \
"failure while processing Process.%s method " \
"(before=%s, after=%s)" % (name, num1, num2)
failures.append(fail)
if failures:
self.fail('\n' + '\n'.join(failures))
class TestDualProcessImplementation(unittest.TestCase):
fun_names = [
# function name, tolerance
('proc_cpu_times', 0.2),
('proc_create_time', 0.5),
('proc_num_handles', 1), # 1 because impl #1 opens a handle
('proc_io_counters', 0),
('proc_memory_info', 1024), # KB
]
def test_compare_values(self):
# Certain APIs on Windows have 2 internal implementations, one
# based on documented Windows APIs, another one based
# NtQuerySystemInformation() which gets called as fallback in
# case the first fails because of limited permission error.
# Here we test that the two methods return the exact same value,
# see:
# https://github.com/giampaolo/psutil/issues/304
def assert_ge_0(obj):
if isinstance(obj, tuple):
for value in obj:
self.assertGreaterEqual(value, 0)
elif isinstance(obj, (int, long, float)):
self.assertGreaterEqual(obj, 0)
else:
assert 0 # case not handled which needs to be fixed
def compare_with_tolerance(ret1, ret2, tolerance):
if ret1 == ret2:
return
else:
if isinstance(ret2, (int, long, float)):
diff = abs(ret1 - ret2)
self.assertLessEqual(diff, tolerance)
elif isinstance(ret2, tuple):
for a, b in zip(ret1, ret2):
diff = abs(a - b)
self.assertLessEqual(diff, tolerance)
failures = []
for name, tolerance in self.fun_names:
meth1 = wrap_exceptions(getattr(_psutil_windows, name))
meth2 = wrap_exceptions(getattr(_psutil_windows, name + '_2'))
for p in psutil.process_iter():
if name == 'proc_memory_info' and p.pid == os.getpid():
continue
#
try:
ret1 = meth1(p.pid)
except psutil.NoSuchProcess:
continue
except psutil.AccessDenied:
ret1 = None
#
try:
ret2 = meth2(p.pid)
except psutil.NoSuchProcess:
# this is supposed to fail only in case of zombie process
# never for permission error
continue
# compare values
try:
if ret1 is None:
assert_ge_0(ret2)
else:
compare_with_tolerance(ret1, ret2, tolerance)
assert_ge_0(ret1)
assert_ge_0(ret2)
except AssertionError:
trace = traceback.format_exc()
msg = '%s\npid=%s, method=%r, ret_1=%r, ret_2=%r' % (
trace, p.pid, name, ret1, ret2)
failures.append(msg)
break
if failures:
self.fail('\n\n'.join(failures))
def test_zombies(self):
# test that NPS is raised by the 2nd implementation in case a
# process no longer exists
ZOMBIE_PID = max(psutil.pids()) + 5000
for name, _ in self.fun_names:
meth = wrap_exceptions(getattr(_psutil_windows, name))
self.assertRaises(psutil.NoSuchProcess, meth, ZOMBIE_PID)
def test_main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(WindowsSpecificTestCase))
test_suite.addTest(unittest.makeSuite(TestDualProcessImplementation))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not test_main():
sys.exit(1)
|
|
import logging
import tornado.web
import os
import binascii
import bcrypt
import json
import datetime
import urllib
import database
import socket
import time
import threading
import tornado.web
from tornado import gen, web, httpclient
from tornado.gen import Return
from models import DeviceModel
from models import SensorValue
from models import UploadedImage
from models import SensorFilter
class RedirectorHandler(tornado.web.RequestHandler):
def initialize(self, manager):
self.manager = manager
def get(self):
host = self.request.host
host = host.split(':')[0]
if self.manager.httpsPort != 443:
host += ":{0}".format(self.manager.httpsPort)
redirectTo = "https://{0}".format(host)
self.redirect(redirectTo)
class BaseWebHandler(tornado.web.RequestHandler):
def isAuthenticated(self):
user = self.get_secure_cookie("user", max_age_days=1)
if user:
return True
else:
return False
class AuthFileHandler(BaseWebHandler):
logger = logging.getLogger()
def initialize(self, path):
self.path = path
def get(self, file):
if self.isAuthenticated():
if file.find("..") > -1:
return
fullPath = os.path.join(self.path, file)
if not os.path.exists(fullPath):
self.set_status(404)
self.write("404 Not Found")
return
ext = file.split('.')[-1]
contentType = "application/octet-stream"
if ext == "jpg" or ext== "jpeg" or ext == "bmp":
contentType = "image/{0}".format(ext)
self.logger.debug("serving file {0}".format(fullPath))
with open(fullPath, mode='rb') as file:
fileData = file.read()
self.write(fileData)
self.set_header("Content-Type", contentType)
else:
self.redirect("/login?"+urllib.urlencode({"returnUrl":self.request.uri}))
class VideoWebHandler(BaseWebHandler):
logger = logging.getLogger()
def initialize(self, localVideoPort):
self.localVideoPort = localVideoPort
def get(self):
if self.isAuthenticated():
self.logger.info("Attempting to stream video from 127.0.0.1:{0}".format(self.localVideoPort))
self.clear()
self.set_status(200)
self.set_header('Connection', 'close')
self.set_header('Max-Age', '0')
self.set_header('Expires', '0')
self.set_header('Cache-Control', 'no-cache, private')
self.set_header('Pragma', 'no-cache')
self.set_header('Content-type','multipart/x-mixed-replace; boundary=--BoundaryString')
self.flush()
self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.connect(('127.0.0.1', self.localVideoPort))
self.sock.sendall("GET http://127.0.0.1:{0}/ HTTP/1.1\r\nHost: 127.0.0.1:{0}\r\n\r\n".format(self.localVideoPort))
#read headers from mjpg stream
line = self.readLine()
while len(line) > 0:
self.logger.debug("header line from video server: {0}".format(line))
line = self.readLine()
#stream video
self.logger.info("Starting serving mjpg stream")
self._auto_finish = False;
threading.Thread(target = self.streamVideo).start()
else:
self.redirect("/login?"+urllib.urlencode({"returnUrl":self.request.uri}))
def streamVideo(self):
cont = True
while cont:
buffer = self.sock.recv(100000)
if len(buffer) > 0:
self.logger.debug("received {0} bytes of video stream".format(len(buffer)))
self.write(buffer)
self.set_header('Content-type','Content-type: image/jpeg')
self.flush()
else:
cont = False
def readLine(self):
c1 = None
c2 = None
line = ""
while c1 != '\r' and c2 != '\n':
buf = self.sock.recv(1)
if len(buf) > 0:
c1 = c2
c2 = buf[0]
line += buf
return line[:-2]
class HomeWebHandler(BaseWebHandler):
def initialize(self, iotManager):
self.iotManager = iotManager
def get(self):
if self.isAuthenticated():
devices = self.iotManager.getOnlineDevices()
self.render("views/home.html", devices=devices)
else:
self.redirect("/login")
class DevicesWebHandler(BaseWebHandler):
logger = logging.getLogger()
def initialize(self, iotManager):
self.iotManager = iotManager
def get(self):
if self.isAuthenticated():
devices = self.iotManager.getAllDevices()
self.render("views/devices.html", devices=devices)
else:
self.redirect("/login?"+urllib.urlencode({"returnUrl":self.request.uri}))
class RssWebHandler(BaseWebHandler):
logger = logging.getLogger()
def initialize(self, iotManager):
self.iotManager = iotManager
def get(self):
devices = self.iotManager.getOnlineDevices()
xml = self.render_string("views/rss.xml", devices=devices)
self.set_header('Content-Type', 'text/xml')
self.finish(xml)
class ApiWebHandler(BaseWebHandler):
logger = logging.getLogger()
def initialize(self, iotManager):
self.iotManager = iotManager
def get(self):
user = user = self.getUser()
if user == "admin":
onlinedevices = self.iotManager.getOnlineDevices()
dev = [{'id':d.deviceId, 'values':[{'id':v.id, 'value':v.value, 'label':v.label} for v in d.values]} for d in onlinedevices]
response = { 'devices': dev }
self.set_header('Content-Type', 'application/json')
self.finish(json.dumps(response))
else:
self.logger.warning("Unauthorized API connection from {0}!".format(self.request.remote_ip))
self.clear()
self.set_status(403)
self.set_header('Content-Type', 'application/json')
self.finish(json.dumps({'error':123, 'message':'forbiden'}))
try:
self.close()
except:
pass
def getUser(self):
user = self.get_secure_cookie("user", max_age_days=1)
if user is None:
secret = self.get_argument("secret", None)
if secret == self.iotManager.apiSecret:
user = "admin"
else:
self.logger.warning("Invalid secret when calling WS api from {0}".format(self.request.remote_ip))
return user
class DeviceWebHandler(BaseWebHandler):
logger = logging.getLogger()
def initialize(self, iotManager):
self.iotManager = iotManager
def get(self, deviceIdHex):
if self.isAuthenticated():
imagesCount = int(tornado.escape.xhtml_escape(self.get_argument("images", "6")))
deviceModel = self.iotManager.getDevice(deviceIdHex, imagesCount)
if deviceModel:
self.render("views/device.html", device = deviceModel, imagesCount=imagesCount)
else:
self.logger.warning("device {0} not found".format(deviceIdHex))
else:
self.redirect("/login?"+urllib.urlencode({"returnUrl":self.request.uri}))
class HistoryWebHandler(BaseWebHandler):
logger = logging.getLogger()
def initialize(self, iotManager):
self.iotManager = iotManager
def get(self):
if self.isAuthenticated():
fromTime = tornado.escape.xhtml_escape(self.get_argument("fromTime", (datetime.datetime.now() - datetime.timedelta(days=2)).strftime('%Y-%m-%d')))
toTime = tornado.escape.xhtml_escape(self.get_argument("toTime", (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')))
aggregation = tornado.escape.xhtml_escape(self.get_argument("aggregation", "minutes"))
sensors = []
chartData = []
chartSensors = []
showChart = False
for deviceId, conf in self.iotManager.deviceConfig.items():
if "values" in conf:
for id, varConf in conf["values"].items():
parameterName = "{0}.{1}".format(deviceId, id)
selected = self.get_argument(parameterName, default=None)
sensorObj = SensorFilter(deviceId, conf["name"], id, varConf.get("label", id), varConf.get("type", "number"), selected)
sensors.append(sensorObj)
if selected:
showChart = True
chartSensors.append(sensorObj)
fromTimeParsed = datetime.datetime.strptime(fromTime, '%Y-%m-%d')
toTimeParsed = datetime.datetime.strptime(toTime, '%Y-%m-%d')
if showChart:
self.logger.debug("Showing chart for period {0} - {1} aggregated to {2} for sensors {3}".format(fromTimeParsed, toTimeParsed, aggregation, chartSensors))
chartData = self.iotManager.database.getChartData(chartSensors, fromTimeParsed, toTimeParsed, aggregation)
finalChartSensors = []
for sensor in chartSensors:
if not all(sensor.fullId not in record for record in chartData):
finalChartSensors.append(sensor)
chartSensors = finalChartSensors
self.render("views/history.html", sensors=sensors, fromTime=fromTime, toTime=toTime, aggregation=aggregation, showChart=showChart, chartData=chartData, chartSensors=chartSensors)
else:
self.redirect("/login?"+urllib.urlencode({"returnUrl":self.request.uri}))
class LogsWebHandler(BaseWebHandler):
logger = logging.getLogger()
def initialize(self, iotManager):
self.iotManager = iotManager
def get(self, deviceIdHex):
self.logger.info("LogsWebHandler 0")
if self.isAuthenticated():
self.logger.info("LogsWebHandler 1")
fromTime = tornado.escape.xhtml_escape(self.get_argument("fromTime", (datetime.datetime.now() - datetime.timedelta(days=2)).strftime('%Y-%m-%d')))
toTime = tornado.escape.xhtml_escape(self.get_argument("toTime", (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')))
self.logger.info("LogsWebHandler 2")
deviceModel = self.iotManager.getDevice(deviceIdHex, 1)
fromTimeParsed = datetime.datetime.strptime(fromTime, '%Y-%m-%d')
toTimeParsed = datetime.datetime.strptime(toTime, '%Y-%m-%d')
self.logger.info("LogsWebHandler 3")
logData = self.iotManager.database.getLogData(deviceIdHex, fromTimeParsed, toTimeParsed)
self.render("views/logs.html", fromTime=fromTime, toTime=toTime, logData=logData, device = deviceModel)
class LoginWebHandler(BaseWebHandler):
logger = logging.getLogger()
def initialize(self, adminPasswordHash):
self.adminPasswordHash = adminPasswordHash
def get(self):
returnUrl = self.get_argument("returnUrl", "/")
self.render("views/login.html", returnUrl=returnUrl)
def post(self):
username = tornado.escape.xhtml_escape(self.get_argument("username", ""))
password = tornado.escape.xhtml_escape(self.get_argument("password", "")).encode('utf-8')
returnUrl = self.get_argument("returnUrl", "/")
self.logger.info("login request with username={0} from ip={1}".format(username, self.request.remote_ip))
if username == "admin" and bcrypt.hashpw(password, self.adminPasswordHash) == self.adminPasswordHash:
self.set_secure_cookie("user", username, expires_days=1)
self.redirect(returnUrl)
else:
self.logger.warning("Invalid login/password request with username={0} from ip={1}".format(username, self.request.remote_ip))
self.render("views/login.html", errormsg="Invalid username or password.", returnUrl=returnUrl)
class LogoutWebHandler(BaseWebHandler):
def get(self):
self.clear_cookie("user")
self.redirect("/")
class WSHandler(tornado.websocket.WebSocketHandler):
logger = logging.getLogger()
connections = set()
def initialize(self, iotManager):
self.iotManager = iotManager
@staticmethod
def sendMessageToAll(msg):
for con in set(WSHandler.connections):
try:
con.write_message(msg)
except:
pass
def open(self):
self.logger.debug('WS New connection was opened from {0}'.format(self.request.remote_ip))
user = self.getUser()
if user == "admin":
self.connections.add(self)
else:
self.logger.warning("Unauthorized WS connection from {0}!".format(self.request.remote_ip))
self.write_message(json.dumps({'error':123}))
try:
self.close()
except:
pass
def on_message(self, message):
self.logger.debug('WS Incoming message:{0} from {1}'.format(message, self.request.remote_ip))
user = user = self.getUser()
if user == "admin":
parsed = json.loads(message)
if "command" in parsed and "deviceId" in parsed:
self.iotManager.sendCommand(parsed["deviceId"], parsed["command"])
else:
try:
self.close()
except:
pass
def on_close(self):
try:
self.connections.remove(self)
except:
pass
self.logger.debug('WS Connection from {0} was closed.'.format(self.request.remote_ip))
def check_origin(self, origin):
self.logger.debug('WS connection origin check: {0}'.format(origin))
return True
def getUser(self):
user = self.get_secure_cookie("user", max_age_days=1)
if user is None:
secret = self.get_argument("secret", None)
if secret == self.iotManager.apiSecret:
user = "admin"
else:
self.logger.warning("Invalid secret when calling WS api from {0}".format(self.request.remote_ip))
return user
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
import six
from bson import json_util
from girder import events
from girder.constants import AccessType, SortDir
from girder.exceptions import ValidationException
from girder.models.model_base import AccessControlledModel
from girder.models.notification import Notification
from girder.models.token import Token
from girder.models.user import User
from ..constants import JobStatus, JOB_HANDLER_LOCAL
class Job(AccessControlledModel):
def initialize(self):
self.name = 'job'
compoundSearchIndex = (
('userId', SortDir.ASCENDING),
('created', SortDir.DESCENDING),
('type', SortDir.ASCENDING),
('status', SortDir.ASCENDING)
)
self.ensureIndices([(compoundSearchIndex, {}),
'created', 'parentId', 'celeryTaskId'])
self.exposeFields(level=AccessType.READ, fields={
'title', 'type', 'created', 'interval', 'when', 'status',
'progress', 'log', 'meta', '_id', 'public', 'parentId', 'async_',
'updated', 'timestamps', 'handler', 'jobInfoSpec'})
self.exposeFields(level=AccessType.SITE_ADMIN, fields={'args', 'kwargs'})
def validate(self, job):
self._validateStatus(job['status'])
return job
def _validateStatus(self, status):
if not JobStatus.isValid(status):
raise ValidationException(
'Invalid job status %s.' % status, field='status')
def _validateChild(self, parentJob, childJob):
if str(parentJob['_id']) == str(childJob['_id']):
raise ValidationException('Child Id cannot be equal to Parent Id')
if childJob['parentId']:
raise ValidationException('Cannot overwrite the Parent Id')
def list(self, user=None, types=None, statuses=None,
limit=0, offset=0, sort=None, currentUser=None, parentJob=None):
"""
List a page of jobs for a given user.
:param user: The user who owns the job.
:type user: dict, 'all', 'none', or None.
:param types: job type filter.
:type types: array of type string, or None.
:param statuses: job status filter.
:type statuses: array of status integer, or None.
:param limit: The page limit.
:param limit: The page limit.
:param offset: The page offset.
:param sort: The sort field.
:param parentJob: Parent Job.
:param currentUser: User for access filtering.
"""
return self.findWithPermissions(
offset=offset, limit=limit, sort=sort, user=currentUser,
types=types, statuses=statuses, jobUser=user, parentJob=parentJob)
def findWithPermissions(self, query=None, offset=0, limit=0, timeout=None, fields=None,
sort=None, user=None, level=AccessType.READ,
types=None, statuses=None, jobUser=None, parentJob=None, **kwargs):
"""
Search the list of jobs.
:param query: The search query (see general MongoDB docs for "find()")
:type query: dict
:param offset: The offset into the results
:type offset: int
:param limit: Maximum number of documents to return
:type limit: int
:param timeout: Cursor timeout in ms. Default is no timeout.
:type timeout: int
:param fields: A mask for filtering result documents by key, or None to return the full
document, passed to MongoDB find() as the `projection` param.
:type fields: `str, list of strings or tuple of strings for fields to be included from the
document, or dict for an inclusion or exclusion projection`.
:param sort: The sort order.
:type sort: List of (key, order) tuples.
:param user: The user to check policies against.
:type user: dict or None
:param level: The access level. Explicitly passing None skips doing
permissions checks.
:type level: AccessType
:param types: job type filter.
:type types: array of type string, or None.
:param statuses: job status filter.
:type statuses: array of status integer, or None.
:param jobUser: The user who owns the job.
:type jobUser: dict, 'all', 'none', or None.
:param parentJob: Parent Job.
:returns: A pymongo Cursor or CommandCursor. If a CommandCursor, it
has been augmented with a count function.
"""
if query is None:
query = {}
# When user is 'all', no filtering by user, list jobs of all users.
if jobUser == 'all':
pass
# When user is 'none' or None, list anonymous user jobs.
elif jobUser == 'none' or jobUser is None:
query['userId'] = None
# Otherwise, filter by user id
else:
query['userId'] = jobUser['_id']
if types is not None:
query['type'] = {'$in': types}
if statuses is not None:
query['status'] = {'$in': statuses}
if parentJob:
query['parentId'] = parentJob['_id']
return super(Job, self).findWithPermissions(
query, offset=offset, limit=limit, timeout=timeout, fields=fields,
sort=sort, user=user, level=level, **kwargs)
def listAll(self, limit=0, offset=0, sort=None, currentUser=None):
"""
List all jobs.
:param limit: The page limit.
:param offset: The page offset
:param sort: The sort field.
:param currentUser: User for access filtering.
.. deprecated :: 2.3.0
Use :func:`job.list` instead
"""
return self.list(user='all', types=None, statuses=None, limit=limit,
offset=offset, sort=sort, currentUser=currentUser)
def cancelJob(self, job):
"""
Revoke/cancel a job. This simply triggers the jobs.cancel event and
sets the job status to CANCELED. If one of the event handlers
calls preventDefault() on the event, this job will *not* be put into
the CANCELED state.
:param job: The job to cancel.
"""
event = events.trigger('jobs.cancel', info=job)
if not event.defaultPrevented:
job = self.updateJob(job, status=JobStatus.CANCELED)
return job
def createLocalJob(self, module, function=None, **kwargs):
"""
Takes the same keyword arguments as :py:func:`createJob`, except this
sets the handler to the local handler and takes additional parameters
to specify the module and function that should be run.
:param module: The name of the python module to run.
:type module: str
:param function: Function name within the module to run. If not passed,
the default name of "run" will be used.
:type function: str or None
:returns: The job that was created.
"""
kwargs['handler'] = JOB_HANDLER_LOCAL
kwargs['save'] = False
job = self.createJob(**kwargs)
job['module'] = module
if function is not None:
job['function'] = function
return self.save(job)
@events._deprecatedAsync
def createJob(self, title, type, args=(), kwargs=None, user=None, when=None,
interval=0, public=False, handler=None, async_=False,
save=True, parentJob=None, otherFields=None):
"""
Create a new job record.
:param title: The title of the job.
:type title: str
:param type: The type of the job.
:type type: str
:param args: Positional args of the job payload.
:type args: list or tuple
:param kwargs: Keyword arguments of the job payload.
:type kwargs: dict
:param user: The user creating the job.
:type user: dict or None
:param when: Minimum start time for the job (UTC).
:type when: datetime
:param interval: If this job should be recurring, set this to a value
in seconds representing how often it should occur. Set to <= 0 for
jobs that should only be run once.
:type interval: int
:param public: Public read access flag.
:type public: bool
:param handler: If this job should be handled by a specific handler,
use this field to store that information.
:param externalToken: If an external token was created for updating this
job, pass it in and it will have the job-specific scope set.
:type externalToken: token (dict) or None.
:param async_: Whether the job is to be run asynchronously. For now this
only applies to jobs that are scheduled to run locally.
:type async_: bool
:param save: Whether the documented should be saved to the database.
:type save: bool
:param parentJob: The job which will be set as a parent
:type parentJob: Job
:param otherFields: Any additional fields to set on the job.
:type otherFields: dict
"""
now = datetime.datetime.utcnow()
if when is None:
when = now
if kwargs is None:
kwargs = {}
otherFields = otherFields or {}
parentId = None
if parentJob:
parentId = parentJob['_id']
job = {
'title': title,
'type': type,
'args': args,
'kwargs': kwargs,
'created': now,
'updated': now,
'when': when,
'interval': interval,
'status': JobStatus.INACTIVE,
'progress': None,
'log': [],
'meta': {},
'handler': handler,
'async_': async_,
'timestamps': [],
'parentId': parentId
}
job.update(otherFields)
self.setPublic(job, public=public)
if user:
job['userId'] = user['_id']
self.setUserAccess(job, user=user, level=AccessType.ADMIN)
else:
job['userId'] = None
if save:
job = self.save(job)
if user:
deserialized_kwargs = job['kwargs']
job['kwargs'] = json_util.dumps(job['kwargs'])
Notification().createNotification(
type='job_created', data=job, user=user,
expires=datetime.datetime.utcnow() + datetime.timedelta(seconds=30))
job['kwargs'] = deserialized_kwargs
return job
def save(self, job, *args, **kwargs):
"""
We extend save so that we can serialize the kwargs before sending them
to the database. This will allow kwargs with $ and . characters in the
keys.
"""
job['kwargs'] = json_util.dumps(job['kwargs'])
job = AccessControlledModel.save(self, job, *args, **kwargs)
job['kwargs'] = json_util.loads(job['kwargs'])
return job
def find(self, *args, **kwargs):
"""
Overrides the default find behavior to exclude the log by default.
:param includeLog: Whether to include the log field in the documents.
:type includeLog: bool
"""
kwargs['fields'] = self._computeFields(kwargs)
return super(Job, self).find(*args, **kwargs)
def load(self, *args, **kwargs):
"""
We extend load to deserialize the kwargs back into a dict since we
serialized them on the way into the database.
:param includeLog: Whether to include the log field in the document.
:type includeLog: bool
"""
kwargs['fields'] = self._computeFields(kwargs)
job = super(Job, self).load(*args, **kwargs)
if job and isinstance(job.get('kwargs'), six.string_types):
job['kwargs'] = json_util.loads(job['kwargs'])
if job and isinstance(job.get('log'), six.string_types):
# Legacy support: log used to be just a string, but we want to
# consistently return a list of strings now.
job['log'] = [job['log']]
return job
def scheduleJob(self, job):
"""
Trigger the event to schedule this job. Other plugins are in charge of
actually scheduling and/or executing the job, except in the case when
the handler is 'local'.
"""
if job.get('async_', job.get('async')) is True:
events.daemon.trigger('jobs.schedule', info=job)
else:
events.trigger('jobs.schedule', info=job)
def createJobToken(self, job, days=7):
"""
Create a token that can be used just for the management of an individual
job, e.g. updating job info, progress, logs, status.
"""
return Token().createToken(days=days, scope='jobs.job_' + str(job['_id']))
def updateJob(self, job, log=None, overwrite=False, status=None,
progressTotal=None, progressCurrent=None, notify=True,
progressMessage=None, otherFields=None):
"""
Update an existing job. Any of the updateable fields that are set to None in the kwargs of
this method will not be modified. If you set progress information on the job for the first
time and set notify=True, a new notification record for the job progress will be created.
If notify=True, job status changes will also create a notification with type="job_status",
and log changes will create a notification with type="job_log".
:param job: The job document to update.
:param log: Message to append to the job log. If you wish to overwrite
instead of append, pass overwrite=True.
:type log: str
:param overwrite: Whether to overwrite the log (default is append).
:type overwrite: bool
:param status: New status for the job.
:type status: JobStatus
:param progressTotal: Max progress value for this job.
:param otherFields: Any additional fields to set on the job.
:type otherFields: dict
"""
event = events.trigger('jobs.job.update', {
'job': job,
'params': {
'log': log,
'overwrite': overwrite,
'status': status,
'progressTotal': progressTotal,
'progressMessage': progressMessage,
'otherFields': otherFields
}
})
if event.defaultPrevented:
return job
now = datetime.datetime.utcnow()
user = None
otherFields = otherFields or {}
if job['userId']:
user = User().load(job['userId'], force=True)
query = {
'_id': job['_id']
}
updates = {
'$push': {},
'$set': {}
}
statusChanged = False
if log is not None:
self._updateLog(job, log, overwrite, now, notify, user, updates)
if status is not None:
try:
status = int(status)
except ValueError:
# Allow non int states
pass
statusChanged = status != job['status']
self._updateStatus(job, status, now, query, updates)
if progressMessage is not None or progressCurrent is not None or progressTotal is not None:
self._updateProgress(
job, progressTotal, progressCurrent, progressMessage, notify, user, updates)
for k, v in six.viewitems(otherFields):
job[k] = v
updates['$set'][k] = v
if updates['$set'] or updates['$push']:
if not updates['$push']:
del updates['$push']
job['updated'] = now
updates['$set']['updated'] = now
updateResult = self.update(query, update=updates, multi=False)
# If our query didn't match anything then our state transition
# was not valid. So raise an exception
if updateResult.matched_count != 1:
job = self.load(job['_id'], force=True)
msg = 'Invalid state transition to \'%s\', Current state is \'%s\'.' % (
status, job['status'])
raise ValidationException(msg, field='status')
events.trigger('jobs.job.update.after', {
'job': job
})
# We don't want todo this until we know the update was successful
if statusChanged and user is not None and notify:
self._createUpdateStatusNotification(now, user, job)
return job
def _updateLog(self, job, log, overwrite, now, notify, user, updates):
"""Helper for updating a job's log."""
if overwrite:
updates['$set']['log'] = [log]
else:
updates['$push']['log'] = log
if notify and user:
expires = now + datetime.timedelta(seconds=30)
Notification().createNotification(
type='job_log', data={
'_id': job['_id'],
'overwrite': overwrite,
'text': log
}, user=user, expires=expires)
def _createUpdateStatusNotification(self, now, user, job):
expires = now + datetime.timedelta(seconds=30)
filtered = self.filter(job, user)
filtered.pop('kwargs', None)
filtered.pop('log', None)
Notification().createNotification(
type='job_status', data=filtered, user=user, expires=expires)
def _updateStatus(self, job, status, now, query, updates):
"""Helper for updating job progress information."""
self._validateStatus(status)
if status != job['status']:
job['status'] = status
previous_states = JobStatus.validTransitions(job, status)
if previous_states is None:
# Get the current state
job = self.load(job['_id'], force=True)
msg = 'No valid state transition to \'%s\'. Current state is \'%s\'.' % (
status, job['status'])
raise ValidationException(msg, field='status')
query['status'] = {
'$in': previous_states
}
updates['$set']['status'] = status
ts = {
'status': status,
'time': now
}
job['timestamps'].append(ts)
updates['$push']['timestamps'] = ts
def _updateProgress(self, job, total, current, message, notify, user, updates):
"""Helper for updating job progress information."""
state = JobStatus.toNotificationStatus(job['status'])
if current is not None:
current = float(current)
if total is not None:
total = float(total)
if job['progress'] is None:
if notify and job['userId']:
notification = self._createProgressNotification(
job, total, current, state, message)
notificationId = notification['_id']
else:
notificationId = None
job['progress'] = {
'message': message,
'total': total,
'current': current,
'notificationId': notificationId
}
updates['$set']['progress'] = job['progress']
else:
if total is not None:
job['progress']['total'] = total
updates['$set']['progress.total'] = total
if current is not None:
job['progress']['current'] = current
updates['$set']['progress.current'] = current
if message is not None:
job['progress']['message'] = message
updates['$set']['progress.message'] = message
if notify and user:
if job['progress']['notificationId'] is None:
notification = self._createProgressNotification(
job, total, current, state, message, user)
nid = notification['_id']
job['progress']['notificationId'] = nid
updates['$set']['progress.notificationId'] = nid
else:
notification = Notification().load(job['progress']['notificationId'])
Notification().updateProgress(
notification, state=state,
message=job['progress']['message'],
current=job['progress']['current'],
total=job['progress']['total'])
def _createProgressNotification(self, job, total, current, state, message,
user=None):
if not user:
user = User().load(job['userId'], force=True)
# TODO support channel-based notifications for jobs. For
# right now we'll just go through the user.
return Notification().initProgress(
user, job['title'], total, state=state, current=current,
message=message, estimateTime=False, resource=job, resourceName=self.name)
def filter(self, doc, user=None, additionalKeys=None):
"""
Overrides the parent ``filter`` method to also deserialize the ``kwargs``
field if it is still in serialized form. This is handled in ``load``, but
required here also for fetching lists of jobs.
"""
doc = super(Job, self).filter(doc, user, additionalKeys=additionalKeys)
if 'kwargs' in doc and isinstance(doc['kwargs'], six.string_types):
doc['kwargs'] = json_util.loads(doc['kwargs'])
return doc
def _computeFields(self, kwargs, includeLogDefault=False):
"""
Helper to compute the projection operator for default log exclusion.
"""
fields = kwargs.get('fields')
if fields is None and not kwargs.pop('includeLog', includeLogDefault):
fields = {'log': False}
return fields
def getAllTypesAndStatuses(self, user):
"""
Get a list of types and statuses of all jobs or jobs owned by a particular user.
:param user: The user who owns the jobs.
:type user: dict, or 'all'.
"""
query = {}
if user == 'all':
pass
else:
query['userId'] = user['_id']
types = self.collection.distinct('type', query)
statuses = self.collection.distinct('status', query)
return {'types': types, 'statuses': statuses}
def setParentJob(self, job, parentJob):
"""
Sets a parent job for a job
:param job: Job document which the parent will be set on
:type job: Job
:param parentJob: Parent job
:type parentId: Job
"""
self._validateChild(parentJob, job)
return self.updateJob(job, otherFields={'parentId': parentJob['_id']})
def listChildJobs(self, job):
"""
Lists the child jobs for a given job
:param job: Job document
:type job: Job
"""
query = {'parentId': job['_id']}
cursor = self.find(query)
user = User().load(job['userId'], force=True)
for r in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ):
yield r
|
|
from CoreIOCs import *
import pytest
from freezegun import freeze_time
Client.severity = 'INFO'
client = Client({'url': 'https://example.com'})
def d_sort(in_dict):
return sorted(in_dict.items())
class TestGetHeaders:
def test_empty_case(self):
"""
Given:
Empty params
Then:
get_headers will not raise error
"""
get_headers({})
class TestHttpRequest:
def test_http_request_ok(self, requests_mock):
"""
Given:
- a client
When:
- http_request returns status code 200.
Then:
- do not raise an error
"""
requests_mock.post('https://example.com/public_api/v1/indicators/suffix', status_code=200, json={})
client.http_request(url_suffix='suffix', requests_kwargs={})
@pytest.mark.parametrize('status_code', client.error_codes.keys())
def test_http_request_error(self, requests_mock, status_code):
"""
Given:
- Status code
When:
- http_request returns this status code.
Then:
- Verify error message.
- Verify exception.res status code matches the http status code.
"""
with pytest.raises(DemistoException) as e:
requests_mock.post('https://example.com/public_api/v1/indicators/suffix', status_code=status_code)
client.http_request('suffix', requests_kwargs={})
assert e.value.message == client.error_codes[status_code]
assert e.value.res.status_code == status_code
def test_http_request_bad_json(self, requests_mock):
"""
Given:
- a client
When:
- http_request returns a response that is not a json.
Then:
- Verify error message.
- Verify demisto exception
"""
text = 'not a json'
with pytest.raises(DemistoException) as e:
requests_mock.post('https://example.com/public_api/v1/indicators/suffix', status_code=200, text=text)
client.http_request('suffix', requests_kwargs={})
assert e.value.message == f'Could not parse json out of {text}'
assert e.value.res.status_code == 200
assert isinstance(e.value.exception, json.JSONDecodeError)
assert e.value.exception.args == ('Expecting value: line 1 column 1 (char 0)',)
class TestGetRequestsKwargs:
def test_with_file(self, mocker):
"""
Given:
- file to upload
Then:
- Verify output format.
"""
def override_open(open_path, *_other):
return open_path
mocker.patch('builtins.open', side_effect=override_open)
path = '/Users/some_user/some_dir/some_file.file'
output = get_requests_kwargs(file_path=path)
expected_output = {'files': [('file', ('iocs.json', path, 'application/json'))]}
assert output == expected_output, f'get_requests_kwargs(file_path={path})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
def test_with_json(self):
"""
Given:
- simple json
Then:
- the json ready to send
"""
_json = {'test': 'test'}
output = get_requests_kwargs(_json=_json)
expected_output = {'data': '{"request_data": {"test": "test"}}'}
assert output == expected_output, f'get_requests_kwargs(_json={_json})\n\treturns: {output}\n\t instead: {expected_output}' # noqa: E501
class TestPrepareCommands:
def test_prepare_get_changes(self):
"""
Given:
- get changes command
Then:
- Verify url and json format.
"""
ts = int(datetime.now(timezone.utc).timestamp() * 1000)
url_suffix, _json = prepare_get_changes(ts)
assert url_suffix == 'get_changes', f'prepare_get_changes\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: get_changes' # noqa: E501
assert _json == {'last_update_ts': ts}
def test_prepare_enable_iocs(self):
"""
Given:
- enable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_enable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'enable_iocs', f'prepare_enable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: enable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
def test_prepare_disable_iocs(self):
"""
Given:
- disable iocs command
Then:
- Verify url and json format.
"""
url_suffix, iocs = prepare_disable_iocs('8.8.8.8,domain.com')
assert url_suffix == 'disable_iocs', f'prepare_disable_iocs\n\treturns url_suffix: {url_suffix}\n\tinstead url_suffix: disable_iocs' # noqa: E501
assert iocs == ['8.8.8.8', 'domain.com']
class TestCreateFile:
path = 'test_data/sync_file_test.json'
data_test_create_file_sync = [
('Domain_iocs', 'Domain_sync_file'),
('IP_iocs', 'IP_sync_file'),
('File_iocs', 'File_sync_file')
]
data_test_create_file_iocs_to_keep = [
('Domain_iocs', 'Domain_iocs_to_keep_file'),
('IP_iocs', 'IP_iocs_to_keep_file'),
('File_iocs', 'File_iocs_to_keep_file')
]
def setup(self):
# creates the file
with open(TestCreateFile.path, 'w') as _file:
_file.write('')
def teardown(self):
# removes the file when done
os.remove(TestCreateFile.path)
@staticmethod
def get_file(path):
with open(path, 'r') as _file:
return _file.read()
@staticmethod
def get_all_iocs(go_over, extension):
iocs = []
total = 0
data = []
for in_iocs, out_iocs in go_over:
ioc = json.loads(TestCreateFile.get_file(f'test_data/{in_iocs}.json'))
iocs.extend(ioc['iocs'])
total += ioc['total']
data.append(TestCreateFile.get_file(f'test_data/{out_iocs}.{extension}'))
all_iocs = {'iocs': iocs, 'total': total}
all_data = ''.join(data)
return all_iocs, all_data
def test_create_file_sync_without_iocs(self, mocker):
"""
Given:
- Sync command
When:
- there is no iocs
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_sync with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_sync)
def test_create_file_sync(self, in_iocs, out_iocs, mocker):
"""
Given:
- Sync command
When:
- iocs type is a specific type.
Then:
- Verify sync file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(self.get_file(f'test_data/{in_iocs}.json'))) # noqa: E501
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_sync with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
def test_create_file_sync_all_types(self, mocker):
"""
Given:
- Sync command
When:
- iocs as all types
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
data_test_create_file_with_empty_indicators = [
{},
{'value': '11.11.11.11'},
{'indicator_type': 'IP'}
]
@pytest.mark.parametrize('defective_indicator', data_test_create_file_with_empty_indicators)
def test_create_file_sync_with_empty_indicators(self, defective_indicator, mocker):
"""
Given:
- Sync command
When:
- a part iocs dont have all required data
Then:
- Verify sync file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_sync, 'txt')
all_iocs['iocs'].append(defective_indicator)
all_iocs['total'] += 1
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
warnings = mocker.patch.object(demisto, 'debug')
create_file_sync(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_sync with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
error_msg = warnings.call_args.args[0]
assert error_msg.startswith("unexpected IOC format in key: '"), f"create_file_sync empty message\n\tstarts: {error_msg}\n\tinstead: unexpected IOC format in key: '" # noqa: E501
assert error_msg.endswith(f"', {str(defective_indicator)}"), f"create_file_sync empty message\n\tends: {error_msg}\n\tinstead: ', {str(defective_indicator)}" # noqa: E501
def test_create_file_iocs_to_keep_without_iocs(self, mocker):
"""
Given:
- iocs to keep command
When:
- there is no iocs
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value={})
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = ''
assert data == expected_data, f'create_file_iocs_to_keep with no iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
@pytest.mark.parametrize('in_iocs, out_iocs', data_test_create_file_iocs_to_keep)
def test_create_file_iocs_to_keep(self, in_iocs, out_iocs, mocker):
"""
Given:
- iocs to keep command
When:
- iocs type is a specific type.
Then:
- Verify iocs to keep file data.
"""
mocker.patch.object(demisto, 'searchIndicators', return_value=json.loads(
self.get_file(f'test_data/{in_iocs}.json')))
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
expected_data = self.get_file(f'test_data/{out_iocs}.txt')
assert data == expected_data, f'create_file_iocs_to_keep with {in_iocs} iocs\n\tcreates: {data}\n\tinstead: {expected_data}' # noqa: E501
def test_create_file_iocs_to_keep_all_types(self, mocker):
"""
Given:
- iocs to keep command
When:
- iocs as all types
Then:
- Verify iocs to keep file data.
"""
all_iocs, expected_data = self.get_all_iocs(self.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=all_iocs)
create_file_iocs_to_keep(TestCreateFile.path)
data = self.get_file(TestCreateFile.path)
assert data == expected_data, f'create_file_iocs_to_keep with all iocs\n\tcreates: {data}\n\tinstead: {expected_data}'
class TestDemistoIOCToCore:
data_test_demisto_expiration_to_core = [
(None, -1),
('', -1),
('0001-01-01T00:00:00Z', -1),
('2020-06-03T00:00:00Z', 1591142400000)
]
@pytest.mark.parametrize('demisto_expiration, core_expiration', data_test_demisto_expiration_to_core)
def test_demisto_expiration_to_core(self, demisto_expiration, core_expiration):
"""
Given:
- demisto indicator expiration
Then:
- Verify XDR expiration.
"""
output = demisto_expiration_to_core(demisto_expiration)
assert core_expiration == output, f'demisto_expiration_to_core({demisto_expiration})\n\treturns: {output}\n\tinstead: {core_expiration}' # noqa: E501
data_test_demisto_reliability_to_core = [
(None, 'F'),
('A - Completely reliable', 'A'),
('B - Usually reliable', 'B'),
('C - Fairly reliable', 'C'),
('D - Not usually reliable', 'D'),
('E - Unreliable', 'E'),
('F - Reliability cannot be judged', 'F')
]
@pytest.mark.parametrize('demisto_reliability, core_reliability', data_test_demisto_reliability_to_core)
def test_demisto_reliability_to_core(self, demisto_reliability, core_reliability):
"""
Given:
- demisto indicator reliability
Then:
- Verify XDR reliability.
"""
output = demisto_reliability_to_core(demisto_reliability)
assert output == core_reliability, f'demisto_reliability_to_core({demisto_reliability})\n\treturns: {output}\n\tinstead: {core_reliability}' # noqa: E501
data_test_demisto_types_to_core = [
('File', 'HASH'),
('IP', 'IP'),
('Domain', 'DOMAIN_NAME')
]
@pytest.mark.parametrize('demisto_type, core_type', data_test_demisto_types_to_core)
def test_demisto_types_to_core(self, demisto_type, core_type):
"""
Given:
- demisto indicator type
Then:
- Verify XDR type.
"""
output = demisto_types_to_core(demisto_type)
assert output == core_type, f'demisto_reliability_to_core({demisto_type})\n\treturns: {output}\n\tinstead: {core_type}'
data_test_demisto_vendors_to_core = [
(
{'moduleID': {'sourceBrand': 'test', 'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 2}},
{'vendor_name': 'moduleID', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}
),
(
{'moduleID': {'sourceBrand': 'test', 'score': 2}},
{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}
),
(
{'moduleID': {'reliability': 'A - Completely reliable', 'score': 0}},
{'vendor_name': 'moduleID', 'reputation': 'UNKNOWN', 'reliability': 'A'}
)
]
@pytest.mark.parametrize('demisto_vendor, core_vendor', data_test_demisto_vendors_to_core)
def test_demisto_vendors_to_core(self, demisto_vendor, core_vendor):
"""
Given:
- demisto indicator vendors reports.
Then:
- Verify XDR vendors format.
"""
output = demisto_vendors_to_core(demisto_vendor)[0]
assert output == core_vendor, f'demisto_vendors_to_core({demisto_vendor})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(core_vendor)}' # noqa: E501
data_test_demisto_ioc_to_core = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 100, 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO', 'type': '100'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'expiration': '2020-06-03T00:00:00Z'},
{'expiration_date': 1591142400000, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentTimeLine', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP'}
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'test'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'comments': [{'type': 'IndicatorCommentRegular', 'content': 'test'}, {'type': 'IndicatorCommentRegular', 'content': 'this is the comment'}]}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'comment': 'this is the comment'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'aggregatedReliability': 'A - Completely reliable'},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'reliability': 'A'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'CustomFields': {'threattypes': {'threatcategory': 'Malware'}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'class': 'Malware'} # noqa: E501
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'moduleToFeedMap': {'module': {'sourceBrand': 'test', 'score': 2}}}, # noqa: E501
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'UNKNOWN', 'severity': 'INFO', 'type': 'IP', 'vendors': [{'vendor_name': 'test', 'reputation': 'SUSPICIOUS', 'reliability': 'F'}]} # noqa: E501
)
]
@pytest.mark.parametrize('demisto_ioc, core_ioc', data_test_demisto_ioc_to_core)
def test_demisto_ioc_to_core(self, demisto_ioc, core_ioc):
"""
Given:
- demisto indicator.
Then:
- Verify XDR indicator format.
"""
output = demisto_ioc_to_core(demisto_ioc)
assert output == core_ioc, f'demisto_ioc_to_core({demisto_ioc})\n\treturns: {d_sort(output)}\n\tinstead: {d_sort(core_ioc)}' # noqa: E501
def test_empty_demisto_ioc_to_core(self, mocker):
warnings = mocker.patch.object(demisto, 'debug')
output = demisto_ioc_to_core({})
assert output == {}, 'demisto_ioc_to_core({})\n\treturns: ' + str(d_sort(output)) + '\n\tinstead: {}'
assert warnings.call_args.args[0] == "unexpected IOC format in key: 'value', {}"
class TestCoreIOCToDemisto:
data_test_core_expiration_to_demisto = [
(-1, 'Never'),
(1591142400000, '2020-06-03T00:00:00Z'),
(1592142400000, '2020-06-14T13:46:40Z')
]
@pytest.mark.parametrize('core_expiration, demisto_expiration', data_test_core_expiration_to_demisto)
def test_core_expiration_to_demisto(self, core_expiration, demisto_expiration):
"""
Given:
- expiration in XDR format.
Then:
- expiration in demisto format.
"""
output = core_expiration_to_demisto(core_expiration)
assert output == demisto_expiration, f'core_expiration_to_demisto({core_expiration})\n\treturns: {output}\n\tinstead: {demisto_expiration}' # noqa: E501
data_test_core_ioc_to_demisto = [
(
{
'RULE_ID': 863, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801230, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'HASH',
'RULE_INDICATOR': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'fa66f1e0e318b6d7b595b6cee580dc0d8e4ac38fbc8dbfcac6ad66dbe282832e',
'type': 'File',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex Core',
'corestatus': 'disabled'
}
}
),
(
{
'RULE_ID': 861, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'DISABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.com', 'REPUTATION': 'GOOD', # noqa: E501
'RELIABILITY': None, 'VENDORS': None, 'KLASS': None, 'IS_DEFAULT_TTL': False, 'RULE_TTL': -1,
'MARKED_DELETED': 0
},
{
'value': 'test.com',
'type': 'Domain',
'score': 1,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex Core',
'corestatus': 'disabled'
}
}
),
(
{
'RULE_ID': 862, 'RULE_INSERT_TIME': 1591165763753, 'RULE_MODIFY_TIME': 1591166095668,
'RULE_SEVERITY': 'SEV_010_INFO', 'NUMBER_OF_HITS': 0, 'RULE_SOURCE': 'XSOAR TIM', 'RULE_COMMENT': '',
'RULE_STATUS': 'ENABLED', 'BS_STATUS': 'DONE', 'BS_TS': 1591165801784, 'BS_RETRIES': 1,
'RULE_EXPIRATION_TIME': -1, 'IOC_TYPE': 'DOMAIN_NAME', 'RULE_INDICATOR': 'test.co.il',
'REPUTATION': 'SUSPICIOUS', 'RELIABILITY': 'A',
'VENDORS': [{'vendor_name': 'Cortex Core - IOC', 'reputation': 'SUSPICIOUS', 'reliability': 'A'}],
'KLASS': None,
'IS_DEFAULT_TTL': False, 'RULE_TTL': -1, 'MARKED_DELETED': 0
},
{
'value': 'test.co.il',
'type': 'Domain',
'score': 2,
'fields': {
'expirationdate': 'Never',
'tags': 'Cortex Core',
'corestatus': 'enabled'
}
}
)
]
class TestCommands:
# test commands full flow
class TestIOCSCommand:
def test_iocs_command_with_enable(self, mocker):
"""
Given:
- enable command
Then:
- Verify enable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='core-iocs-enable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('CoreIOCs.Client.http_request', return_value={})
outputs = mocker.patch('CoreIOCs.return_outputs')
enable_ioc = mocker.patch('CoreIOCs.prepare_enable_iocs', side_effect=prepare_enable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 enabled.', f'enable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 enabled.' # noqa: E501
assert enable_ioc.call_count == 1, 'enable command not called'
def test_iocs_command_with_disable(self, mocker):
"""
Given:
- disable command
Then:
- Verify disable command is called.
"""
mocker.patch.object(demisto, 'command', return_value='core-iocs-disable')
mocker.patch.object(demisto, 'args', return_value={'indicator': '11.11.11.11'})
mocker.patch('CoreIOCs.Client.http_request', return_value={})
outputs = mocker.patch('CoreIOCs.return_outputs')
disable_ioc = mocker.patch('CoreIOCs.prepare_disable_iocs', side_effect=prepare_disable_iocs)
iocs_command(client)
output = outputs.call_args.args[0]
assert output == 'indicators 11.11.11.11 disabled.', f'disable command\n\tprints: {output}\n\tinstead: indicators 11.11.11.11 disabled.' # noqa: E501
assert disable_ioc.call_count == 1, 'disable command not called'
def test_sync(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('CoreIOCs.return_outputs')
sync(client)
assert http_request.call_args.args[0] == 'sync_tim_iocs', 'sync command url changed'
def test_get_sync_file(self, mocker):
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
return_results_mock = mocker.patch('CoreIOCs.return_results')
get_sync_file()
assert return_results_mock.call_args[0][0]['File'] == 'core-sync-file'
def test_set_sync_time(self, mocker):
mocker_reurn_results = mocker.patch('CoreIOCs.return_results')
mocker_set_context = mocker.patch.object(demisto, 'setIntegrationContext')
set_sync_time('2021-11-25T00:00:00')
mocker_reurn_results.assert_called_once_with('set sync time to 2021-11-25T00:00:00 seccedded.')
call_args = mocker_set_context.call_args[0][0]
assert call_args['ts'] == 1637798400000
assert call_args['time'] == '2021-11-25T00:00:00Z'
assert call_args['iocs_to_keep_time']
def test_set_sync_time_with_invalid_time(self):
with pytest.raises(ValueError, match='invalid time format.'):
set_sync_time('test')
@freeze_time('2020-06-03T02:00:00Z')
def test_iocs_to_keep(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_iocs_to_keep, 'txt')
mocker.patch.object(demisto, 'searchIndicators', returnvalue=iocs)
mocker.patch('CoreIOCs.return_outputs')
iocs_to_keep(client)
assert http_request.call_args.args[0] == 'iocs_to_keep', 'iocs_to_keep command url changed'
def test_tim_insert_jsons(self, mocker):
http_request = mocker.patch.object(Client, 'http_request')
mocker.patch.object(demisto, 'getIntegrationContext', return_value={'time': '2020-06-03T00:00:00Z'})
iocs, _ = TestCreateFile.get_all_iocs(TestCreateFile.data_test_create_file_sync, 'txt')
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs)
mocker.patch('CoreIOCs.return_outputs')
tim_insert_jsons(client)
assert http_request.call_args.kwargs['url_suffix'] == 'tim_insert_jsons/', 'tim_insert_jsons command url changed'
class TestParams:
tags_test = [
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tlp_color': ''},
'Cortex Core',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'tag': 'tag1'},
'tag1',
None
),
(
{'value': '11.11.11.11', 'indicator_type': 'IP', 'score': 2},
{'expiration_date': -1, 'indicator': '11.11.11.11', 'reputation': 'SUSPICIOUS', 'severity': 'INFO',
'type': 'IP'},
{'feedTags': 'tag2', 'tlp_color': 'AMBER'},
'tag2',
'AMBER'
)
]
def test_file_deleted_for_create_file_sync(mocker):
file_path = 'test'
mocker.patch('CoreIOCs.get_temp_file', return_value=file_path)
open(file_path, 'w').close()
def raise_function(*_args, **_kwargs):
raise DemistoException(file_path)
mocker.patch('CoreIOCs.create_file_sync', new=raise_function)
with pytest.raises(DemistoException):
get_sync_file()
assert os.path.exists(file_path) is False
data_test_test_file_deleted = [
(sync, 'create_file_sync'),
(iocs_to_keep, 'create_file_iocs_to_keep'),
]
@pytest.mark.parametrize('method_to_test,iner_method', data_test_test_file_deleted)
@freeze_time('2020-06-03T02:00:00Z')
def test_file_deleted(mocker, method_to_test, iner_method):
file_path = 'test'
mocker.patch('CoreIOCs.get_temp_file', return_value=file_path)
open(file_path, 'w').close()
def raise_function(*_args, **_kwargs):
raise DemistoException(file_path)
mocker.patch(f'CoreIOCs.{iner_method}', new=raise_function)
with pytest.raises(DemistoException):
method_to_test(None)
assert os.path.exists(file_path) is False
|
|
# Natural Language Toolkit: Probabilistic Chart Parsers
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for associating probabilities with tree
structures that represent the internal organization of a text. The
probabilistic parser module defines C{BottomUpChartParse}.
C{BottomUpChartParse} is an abstract class that implements a
bottom-up chart parser for C{PCFG}s. It maintains a queue of edges,
and adds them to the chart one at a time. The ordering of this queue
is based on the probabilities associated with the edges, allowing the
parser to expand more likely edges before less likely ones. Each
subclass implements a different queue ordering, producing different
search strategies. Currently the following subclasses are defined:
- C{InsideParse} searches edges in decreasing order of
their trees' inside probabilities.
- C{RandomParse} searches edges in random order.
- C{LongestParse} searches edges in decreasing order of their
location's length.
- C{BeamParse} limits the number of edges in the queue, and
searches edges in decreasing order of their trees' inside
probabilities.
"""
##//////////////////////////////////////////////////////
## Bottom-Up PCFG Chart Parser
##//////////////////////////////////////////////////////
# [XX] This might not be implemented quite right -- it would be better
# to associate probabilities with child pointer lists.
from en.parser.nltk_lite.parse.chart import *
from en.parser.nltk_lite.parse.tree import ProbabilisticTree
from en.parser.nltk_lite.parse.cfg import Nonterminal
# Probabilistic edges
class ProbabilisticLeafEdge(LeafEdge):
def prob(self): return 1.0
class ProbabilisticTreeEdge(TreeEdge):
def __init__(self, prob, *args, **kwargs):
self._prob = prob
TreeEdge.__init__(self, *args, **kwargs)
def prob(self): return self._prob
def __cmp__(self, other):
if self._prob != other.prob(): return -1
return TreeEdge.__cmp__(self, other)
def from_production(production, index, p):
return ProbabilisticTreeEdge(p, (index, index), production.lhs(),
production.rhs(), 0)
from_production = staticmethod(from_production)
# Rules using probabilistic edges
class BottomUpInitRule(AbstractChartRule):
NUM_EDGES=0
def apply_iter(self, chart, grammar):
for index in range(chart.num_leaves()):
new_edge = ProbabilisticLeafEdge(chart.leaf(index), index)
if chart.insert(new_edge, ()):
yield new_edge
class BottomUpPredictRule(AbstractChartRule):
NUM_EDGES=1
def apply_iter(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions():
if edge.lhs() == prod.rhs()[0]:
new_edge = ProbabilisticTreeEdge.from_production(prod, edge.start(), prod.prob())
if chart.insert(new_edge, ()):
yield new_edge
class FundamentalRule(AbstractChartRule):
NUM_EDGES=2
def apply_iter(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.end() == right_edge.start() and
left_edge.next() == right_edge.lhs() and
left_edge.is_incomplete() and right_edge.is_complete()):
return
# Construct the new edge.
p = left_edge.prob() * right_edge.prob()
new_edge = ProbabilisticTreeEdge(p,
span=(left_edge.start(), right_edge.end()),
lhs=left_edge.lhs(), rhs=left_edge.rhs(),
dot=left_edge.dot()+1)
# Add it to the chart, with appropriate child pointers.
changed_chart = False
for cpl1 in chart.child_pointer_lists(left_edge):
if chart.insert(new_edge, cpl1+(right_edge,)):
changed_chart = True
# If we changed the chart, then generate the edge.
if changed_chart: yield new_edge
class SingleEdgeFundamentalRule(AbstractChartRule):
NUM_EDGES=1
_fundamental_rule = FundamentalRule()
def apply_iter(self, chart, grammar, edge1):
fr = self._fundamental_rule
if edge1.is_incomplete():
# edge1 = left_edge; edge2 = right_edge
for edge2 in chart.select(start=edge1.end(), is_complete=True,
lhs=edge1.next()):
for new_edge in fr.apply_iter(chart, grammar, edge1, edge2):
yield new_edge
else:
# edge2 = left_edge; edge1 = right_edge
for edge2 in chart.select(end=edge1.start(), is_complete=False,
next=edge1.lhs()):
for new_edge in fr.apply_iter(chart, grammar, edge2, edge1):
yield new_edge
def __str__(self): return 'Fundamental Rule'
class BottomUpChartParse(AbstractParse):
"""
An abstract bottom-up parser for C{PCFG}s that uses a C{Chart} to
record partial results. C{BottomUpChartParse} maintains a
queue of edges that can be added to the chart. This queue is
initialized with edges for each token in the text that is being
parsed. C{BottomUpChartParse} inserts these edges into the
chart one at a time, starting with the most likely edges, and
proceeding to less likely edges. For each edge that is added to
the chart, it may become possible to insert additional edges into
the chart; these are added to the queue. This process continues
until enough complete parses have been generated, or until the
queue is empty.
The sorting order for the queue is not specified by
C{BottomUpChartParse}. Different sorting orders will result
in different search strategies. The sorting order for the queue
is defined by the method C{sort_queue}; subclasses are required
to provide a definition for this method.
@type _grammar: C{PCFG}
@ivar _grammar: The grammar used to parse sentences.
@type _trace: C{int}
@ivar _trace: The level of tracing output that should be generated
when parsing a text.
"""
def __init__(self, grammar, trace=0):
"""
Create a new C{BottomUpChartParse}, that uses C{grammar}
to parse texts.
@type grammar: C{PCFG}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._trace = trace
AbstractParse.__init__(self)
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
self._trace = trace
def get_parse_list(self, tokens):
chart = Chart(tokens)
grammar = self._grammar
# Chart parser rules.
bu_init = BottomUpInitRule()
bu = BottomUpPredictRule()
fr = SingleEdgeFundamentalRule()
# Our queue!
queue = []
# Initialize the chart.
for e in bu_init.apply_iter(chart, grammar):
if self._trace>1: chart.pp_edge(e,width=2)
queue.append(e)
while len(queue) > 0:
# Re-sort the queue.
self.sort_queue(queue, chart)
# Get the best edge.
edge = queue.pop()
if self._trace>0:
print ' %-50s prob=%s' % (chart.pp_edge(edge,width=2),
edge.prob())
# Apply BU & FR to it.
queue.extend(bu.apply(chart, grammar, edge))
queue.extend(fr.apply(chart, grammar, edge))
# Get a list of complete parses.
parses = chart.parses(grammar.start(), ProbabilisticTree)
# Assign probabilities to the trees.
prod_probs = {}
for prod in grammar.productions():
prod_probs[prod.lhs(), prod.rhs()] = prod.prob()
for parse in parses:
self._setprob(parse, prod_probs)
# Sort by probability
parses.sort(lambda a,b: cmp(b.prob(), a.prob()))
return parses
def _setprob(self, tree, prod_probs):
if tree.prob() is not None: return
# Get the prob of the CFG production.
lhs = Nonterminal(tree.node)
rhs = []
for child in tree:
if isinstance(child, Tree):
rhs.append(Nonterminal(child.node))
else:
rhs.append(child)
prob = prod_probs[lhs, tuple(rhs)]
# Get the probs of children.
for child in tree:
if isinstance(child, Tree):
self._setprob(child, prod_probs)
prob *= child.prob()
tree.set_prob(prob)
def sort_queue(self, queue, chart):
"""
Sort the given queue of C{Edge}s, placing the edge that should
be tried first at the beginning of the queue. This method
will be called after each C{Edge} is added to the queue.
@param queue: The queue of C{Edge}s to sort. Each edge in
this queue is an edge that could be added to the chart by
the fundamental rule; but that has not yet been added.
@type queue: C{list} of C{Edge}
@param chart: The chart being used to parse the text. This
chart can be used to provide extra information for sorting
the queue.
@type chart: C{Chart}
@rtype: C{None}
"""
raise AssertionError, "BottomUpChartParse is an abstract class"
class InsideParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that tries edges in descending
order of the inside probabilities of their trees. The X{inside
probability} of a tree is simply the
probability of the entire tree, ignoring its context. In
particular, the inside probability of a tree generated by
production M{p} with children M{c[1]}, M{c[2]}, ..., M{c[n]} is
P(M{p})*P(M{c[1]})*P(M{c[2]})*M{...}*P(M{c[n]}); and the inside
probability of a token is 1 if it is present in the text, and 0 if
it is absent.
This sorting order results in a type of lowest-cost-first search
strategy.
"""
# Inherit constructor.
def sort_queue(self, queue, chart):
"""
Sort the given queue of edges, in descending order of the
inside probabilities of the edges' trees.
@param queue: The queue of C{Edge}s to sort. Each edge in
this queue is an edge that could be added to the chart by
the fundamental rule; but that has not yet been added.
@type queue: C{list} of C{Edge}
@param chart: The chart being used to parse the text. This
chart can be used to provide extra information for sorting
the queue.
@type chart: C{Chart}
@rtype: C{None}
"""
queue.sort(lambda e1,e2:cmp(e1.prob(), e2.prob()))
# Eventually, this will become some sort of inside-outside parser:
# class InsideOutsideParse(BottomUpChartParse):
# def __init__(self, grammar, trace=0):
# # Inherit docs.
# BottomUpChartParse.__init__(self, grammar, trace)
#
# # Find the best path from S to each nonterminal
# bestp = {}
# for production in grammar.productions(): bestp[production.lhs()]=0
# bestp[grammar.start()] = 1.0
#
# for i in range(len(grammar.productions())):
# for production in grammar.productions():
# lhs = production.lhs()
# for elt in production.rhs():
# bestp[elt] = max(bestp[lhs]*production.prob(),
# bestp.get(elt,0))
#
# self._bestp = bestp
# for (k,v) in self._bestp.items(): print k,v
#
# def _cmp(self, e1, e2):
# return cmp(e1.structure()[PROB]*self._bestp[e1.lhs()],
# e2.structure()[PROB]*self._bestp[e2.lhs()])
#
# def sort_queue(self, queue, chart):
# queue.sort(self._cmp)
import random
class RandomParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that tries edges in random order.
This sorting order results in a random search strategy.
"""
# Inherit constructor
def sort_queue(self, queue, chart):
i = random.randint(0, len(queue)-1)
(queue[-1], queue[i]) = (queue[i], queue[-1])
class UnsortedParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that tries edges in whatever order.
"""
# Inherit constructor
def sort_queue(self, queue, chart): return
class LongestParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that tries longer edges before
shorter ones. This sorting order results in a type of best-first
search strategy.
"""
# Inherit constructor
def sort_queue(self, queue, chart):
queue.sort(lambda e1,e2: cmp(e1.length(), e2.length()))
class BeamParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that limits the number of edges in
its edge queue.
"""
def __init__(self, beam_size, grammar, trace=0):
"""
Create a new C{BottomUpChartParse}, that uses C{grammar}
to parse texts.
@type beam_size: C{int}
@param beam_size: The maximum length for the parser's edge queue.
@type grammar: C{pcfg.Grammar}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
BottomUpChartParse.__init__(self, grammar, trace)
self._beam_size = beam_size
def sort_queue(self, queue, chart):
queue.sort(lambda e1,e2:cmp(e1.prob(), e2.prob()))
if len(queue) > self._beam_size:
split = len(queue)-self._beam_size
if self._trace > 2:
for edge in queue[:split]:
print ' %-50s [DISCARDED]' % chart.pp_edge(edge,2)
queue[:] = queue[split:]
##//////////////////////////////////////////////////////
## Test Code
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration of the probabilistic parsers. The user is
prompted to select which demo to run, and how many parses should
be found; and then each parser is run on the same demo, and a
summary of the results are displayed.
"""
import sys, time
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse import cfg, pcfg, pchart
# Define two demos. Each demo has a sentence and a grammar.
demos = [('I saw John with my cookie', pcfg.toy1),
('the boy saw Jack with Bob under the table with a telescope',
pcfg.toy2)]
# Ask the user which demo they want to use.
print
for i in range(len(demos)):
print '%3s: %s' % (i+1, demos[i][0])
print ' %r' % demos[i][1]
print
print 'Which demo (%d-%d)? ' % (1, len(demos)),
try:
snum = int(sys.stdin.readline().strip())-1
sent, grammar = demos[snum]
except:
print 'Bad sentence number'
return
# Tokenize the sentence.
tokens = list(tokenize.whitespace(sent))
# Define a list of parsers. We'll use all parsers.
parsers = [
pchart.InsideParse(grammar),
pchart.RandomParse(grammar),
pchart.UnsortedParse(grammar),
pchart.LongestParse(grammar),
pchart.BeamParse(len(tokens)+1, grammar)
]
# Run the parsers on the tokenized sentence.
times = []
average_p = []
num_parses = []
all_parses = {}
for parser in parsers:
print '\ns: %s\nparser: %s\ngrammar: %s' % (sent,parser,pcfg)
parser.trace(3)
t = time.time()
parses = parser.get_parse_list(tokens)
times.append(time.time()-t)
if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
average_p.append(p)
num_parses.append(len(parses))
for p in parses: all_parses[p.freeze()] = 1
# Print some summary statistics
print
print ' Parser | Time (secs) # Parses Average P(parse)'
print '-------------------+------------------------------------------'
for i in range(len(parsers)):
print '%18s |%11.4f%11d%19.14f' % (parsers[i].__class__.__name__,
times[i],num_parses[i],average_p[i])
parses = all_parses.keys()
if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
print '-------------------+------------------------------------------'
print '%18s |%11s%11d%19.14f' % ('(All Parses)', 'n/a', len(parses), p)
# Ask the user if we should draw the parses.
print
print 'Draw parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
from en.parser.nltk_lite.draw.tree import draw_trees
print ' please wait...'
draw_trees(*parses)
# Ask the user if we should print the parses.
print
print 'Print parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
for parse in parses:
print parse
if __name__ == '__main__':
demo()
|
|
"""Tests for sysconfig."""
import unittest
import sys
import os
import shutil
import subprocess
from copy import copy, deepcopy
from test.test_support import run_unittest, TESTFN, unlink, get_attribute
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var)
class TestSysConfig(unittest.TestCase):
def setUp(self):
"""Make a copy of sys.path"""
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
self.makefile = None
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._uname = None
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = copy(sysconfig._CONFIG_VARS)
self.old_environ = deepcopy(os.environ)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
if self.makefile is not None:
os.unlink(self.makefile)
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = copy(self._config_vars)
for key, value in self.old_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
for key in os.environ.keys():
if key not in self.old_environ:
del os.environ[key]
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = uname
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = wanted.items()
wanted.sort()
scheme = scheme.items()
scheme.sort()
self.assertEqual(scheme, wanted)
def test_get_path(self):
# xxx make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxint
try:
sys.maxint = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxint = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxint = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxint
try:
sys.maxint = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxint = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxint = maxint
# macbook with fat binaries (fat, universal or fat64)
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'ppc64', 'x86_64'):
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3'%(arch,))
self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,))
# macosx with ARCHFLAGS set and empty _CONFIG_VARS
os.environ['ARCHFLAGS'] = '-arch i386'
sysconfig._CONFIG_VARS = None
# this will attempt to recreate the _CONFIG_VARS based on environment
# variables; used to check a problem with the PyPy's _init_posix
# implementation; see: issue 705
get_config_vars()
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user', 'pypy')
self.assertEqual(get_scheme_names(), wanted)
def test_symlink(self):
# Issue 7880
symlink = get_attribute(os, "symlink")
def get(python):
cmd = [python, '-c',
'import sysconfig; print sysconfig.get_platform()']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue #8759: make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
# the global scheme mirrors the distinction between prefix and
# exec-prefix but not the user scheme, so we have to adapt the paths
# before comparing (issue #9100)
adapt = sys.prefix != sys.exec_prefix
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
if adapt:
global_path = global_path.replace(sys.exec_prefix, sys.prefix)
base = base.replace(sys.exec_prefix, sys.prefix)
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user, 1))
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
with open('/dev/null', 'w') as devnull_fp:
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=devnull_fp,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=open('/dev/null'),
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
def test_main():
run_unittest(TestSysConfig)
if __name__ == "__main__":
test_main()
|
|
from __future__ import unicode_literals
from jinja2 import Template
from six.moves.urllib.parse import parse_qs, urlparse
from moto.core.responses import BaseResponse
from .models import route53_backend
import xmltodict
class Route53(BaseResponse):
def list_or_create_hostzone_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == "POST":
elements = xmltodict.parse(self.body)
if "HostedZoneConfig" in elements["CreateHostedZoneRequest"]:
comment = elements["CreateHostedZoneRequest"]["HostedZoneConfig"][
"Comment"
]
try:
# in boto3, this field is set directly in the xml
private_zone = elements["CreateHostedZoneRequest"][
"HostedZoneConfig"
]["PrivateZone"]
except KeyError:
# if a VPC subsection is only included in xmls params when private_zone=True,
# see boto: boto/route53/connection.py
private_zone = "VPC" in elements["CreateHostedZoneRequest"]
else:
comment = None
private_zone = False
name = elements["CreateHostedZoneRequest"]["Name"]
if name[-1] != ".":
name += "."
new_zone = route53_backend.create_hosted_zone(
name, comment=comment, private_zone=private_zone
)
template = Template(CREATE_HOSTED_ZONE_RESPONSE)
return 201, headers, template.render(zone=new_zone)
elif request.method == "GET":
all_zones = route53_backend.get_all_hosted_zones()
template = Template(LIST_HOSTED_ZONES_RESPONSE)
return 200, headers, template.render(zones=all_zones)
def list_hosted_zones_by_name_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
query_params = parse_qs(parsed_url.query)
dnsname = query_params.get("dnsname")
if dnsname:
dnsname = dnsname[0]
if dnsname[-1] != ".":
dnsname += "."
zones = [
zone
for zone in route53_backend.get_all_hosted_zones()
if zone.name == dnsname
]
else:
# sort by names, but with domain components reversed
# see http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.list_hosted_zones_by_name
def sort_key(zone):
domains = zone.name.split(".")
if domains[-1] == "":
domains = domains[-1:] + domains[:-1]
return ".".join(reversed(domains))
zones = route53_backend.get_all_hosted_zones()
zones = sorted(zones, key=sort_key)
template = Template(LIST_HOSTED_ZONES_BY_NAME_RESPONSE)
return 200, headers, template.render(zones=zones, dnsname=dnsname)
def get_or_delete_hostzone_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
zoneid = parsed_url.path.rstrip("/").rsplit("/", 1)[1]
the_zone = route53_backend.get_hosted_zone(zoneid)
if not the_zone:
return 404, headers, "Zone %s not Found" % zoneid
if request.method == "GET":
template = Template(GET_HOSTED_ZONE_RESPONSE)
return 200, headers, template.render(zone=the_zone)
elif request.method == "DELETE":
route53_backend.delete_hosted_zone(zoneid)
return 200, headers, DELETE_HOSTED_ZONE_RESPONSE
def rrset_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
method = request.method
zoneid = parsed_url.path.rstrip("/").rsplit("/", 2)[1]
the_zone = route53_backend.get_hosted_zone(zoneid)
if not the_zone:
return 404, headers, "Zone %s Not Found" % zoneid
if method == "POST":
elements = xmltodict.parse(self.body)
change_list = elements["ChangeResourceRecordSetsRequest"]["ChangeBatch"][
"Changes"
]["Change"]
if not isinstance(change_list, list):
change_list = [
elements["ChangeResourceRecordSetsRequest"]["ChangeBatch"][
"Changes"
]["Change"]
]
for value in change_list:
action = value["Action"]
record_set = value["ResourceRecordSet"]
cleaned_record_name = record_set["Name"].strip(".")
cleaned_hosted_zone_name = the_zone.name.strip(".")
if not cleaned_record_name.endswith(cleaned_hosted_zone_name):
error_msg = """
An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
RRSet with DNS name %s is not permitted in zone %s
""" % (
record_set["Name"],
the_zone.name,
)
return 400, headers, error_msg
if not record_set["Name"].endswith("."):
record_set["Name"] += "."
if action in ("CREATE", "UPSERT"):
if "ResourceRecords" in record_set:
resource_records = list(record_set["ResourceRecords"].values())[
0
]
if not isinstance(resource_records, list):
# Depending on how many records there are, this may
# or may not be a list
resource_records = [resource_records]
record_set["ResourceRecords"] = [
x["Value"] for x in resource_records
]
if action == "CREATE":
the_zone.add_rrset(record_set)
else:
the_zone.upsert_rrset(record_set)
elif action == "DELETE":
if "SetIdentifier" in record_set:
the_zone.delete_rrset_by_id(record_set["SetIdentifier"])
else:
the_zone.delete_rrset(record_set)
return 200, headers, CHANGE_RRSET_RESPONSE
elif method == "GET":
querystring = parse_qs(parsed_url.query)
template = Template(LIST_RRSET_RESPONSE)
start_type = querystring.get("type", [None])[0]
start_name = querystring.get("name", [None])[0]
if start_type and not start_name:
return 400, headers, "The input is not valid"
record_sets = the_zone.get_record_sets(start_type, start_name)
return 200, headers, template.render(record_sets=record_sets)
def health_check_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
method = request.method
if method == "POST":
properties = xmltodict.parse(self.body)["CreateHealthCheckRequest"][
"HealthCheckConfig"
]
health_check_args = {
"ip_address": properties.get("IPAddress"),
"port": properties.get("Port"),
"type": properties["Type"],
"resource_path": properties.get("ResourcePath"),
"fqdn": properties.get("FullyQualifiedDomainName"),
"search_string": properties.get("SearchString"),
"request_interval": properties.get("RequestInterval"),
"failure_threshold": properties.get("FailureThreshold"),
}
health_check = route53_backend.create_health_check(health_check_args)
template = Template(CREATE_HEALTH_CHECK_RESPONSE)
return 201, headers, template.render(health_check=health_check)
elif method == "DELETE":
health_check_id = parsed_url.path.split("/")[-1]
route53_backend.delete_health_check(health_check_id)
return 200, headers, DELETE_HEALTH_CHECK_RESPONSE
elif method == "GET":
template = Template(LIST_HEALTH_CHECKS_RESPONSE)
health_checks = route53_backend.get_health_checks()
return 200, headers, template.render(health_checks=health_checks)
def not_implemented_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
action = ""
if "tags" in full_url:
action = "tags"
elif "trafficpolicyinstances" in full_url:
action = "policies"
raise NotImplementedError(
"The action for {0} has not been implemented for route 53".format(action)
)
def list_or_change_tags_for_resource_request(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
id_ = parsed_url.path.split("/")[-1]
type_ = parsed_url.path.split("/")[-2]
if request.method == "GET":
tags = route53_backend.list_tags_for_resource(id_)
template = Template(LIST_TAGS_FOR_RESOURCE_RESPONSE)
return (
200,
headers,
template.render(resource_type=type_, resource_id=id_, tags=tags),
)
if request.method == "POST":
tags = xmltodict.parse(self.body)["ChangeTagsForResourceRequest"]
if "AddTags" in tags:
tags = tags["AddTags"]
elif "RemoveTagKeys" in tags:
tags = tags["RemoveTagKeys"]
route53_backend.change_tags_for_resource(id_, tags)
template = Template(CHANGE_TAGS_FOR_RESOURCE_RESPONSE)
return 200, headers, template.render()
def get_change(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == "GET":
parsed_url = urlparse(full_url)
change_id = parsed_url.path.rstrip("/").rsplit("/", 1)[1]
template = Template(GET_CHANGE_RESPONSE)
return 200, headers, template.render(change_id=change_id)
LIST_TAGS_FOR_RESOURCE_RESPONSE = """
<ListTagsForResourceResponse xmlns="https://route53.amazonaws.com/doc/2015-01-01/">
<ResourceTagSet>
<ResourceType>{{resource_type}}</ResourceType>
<ResourceId>{{resource_id}}</ResourceId>
<Tags>
{% for key, value in tags.items() %}
<Tag>
<Key>{{key}}</Key>
<Value>{{value}}</Value>
</Tag>
{% endfor %}
</Tags>
</ResourceTagSet>
</ListTagsForResourceResponse>
"""
CHANGE_TAGS_FOR_RESOURCE_RESPONSE = """<ChangeTagsForResourceResponse xmlns="https://route53.amazonaws.com/doc/2015-01-01/">
</ChangeTagsForResourceResponse>
"""
LIST_RRSET_RESPONSE = """<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ResourceRecordSets>
{% for record_set in record_sets %}
{{ record_set.to_xml() }}
{% endfor %}
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
</ListResourceRecordSetsResponse>"""
CHANGE_RRSET_RESPONSE = """<ChangeResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ChangeInfo>
<Status>INSYNC</Status>
<SubmittedAt>2010-09-10T01:36:41.958Z</SubmittedAt>
<Id>/change/C2682N5HXP0BZ4</Id>
</ChangeInfo>
</ChangeResourceRecordSetsResponse>"""
DELETE_HOSTED_ZONE_RESPONSE = """<DeleteHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ChangeInfo>
</ChangeInfo>
</DeleteHostedZoneResponse>"""
GET_HOSTED_ZONE_RESPONSE = """<GetHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>moto.test.com</NameServer>
</NameServers>
</DelegationSet>
</GetHostedZoneResponse>"""
CREATE_HOSTED_ZONE_RESPONSE = """<CreateHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<ResourceRecordSetCount>0</ResourceRecordSetCount>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>moto.test.com</NameServer>
</NameServers>
</DelegationSet>
</CreateHostedZoneResponse>"""
LIST_HOSTED_ZONES_RESPONSE = """<ListHostedZonesResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZones>
{% for zone in zones %}
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
</HostedZone>
{% endfor %}
</HostedZones>
<IsTruncated>false</IsTruncated>
</ListHostedZonesResponse>"""
LIST_HOSTED_ZONES_BY_NAME_RESPONSE = """<ListHostedZonesByNameResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
{% if dnsname %}
<DNSName>{{ dnsname }}</DNSName>
{% endif %}
<HostedZones>
{% for zone in zones %}
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
</HostedZone>
{% endfor %}
</HostedZones>
<IsTruncated>false</IsTruncated>
</ListHostedZonesByNameResponse>"""
CREATE_HEALTH_CHECK_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
{{ health_check.to_xml() }}
</CreateHealthCheckResponse>"""
LIST_HEALTH_CHECKS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListHealthChecksResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<HealthChecks>
{% for health_check in health_checks %}
{{ health_check.to_xml() }}
{% endfor %}
</HealthChecks>
<IsTruncated>false</IsTruncated>
<MaxItems>{{ health_checks|length }}</MaxItems>
</ListHealthChecksResponse>"""
DELETE_HEALTH_CHECK_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
</DeleteHealthCheckResponse>"""
GET_CHANGE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<GetChangeResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeInfo>
<Status>INSYNC</Status>
<SubmittedAt>2010-09-10T01:36:41.958Z</SubmittedAt>
<Id>{{ change_id }}</Id>
</ChangeInfo>
</GetChangeResponse>"""
|
|
import json
from django import http
from django.db import IntegrityError
from django.db.models import F
from django.forms.models import modelformset_factory
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.csrf import csrf_exempt
import commonware.log
import waffle
import amo
import amo.utils
import api.utils
import api.views
from amo.decorators import post_required
from amo.models import manual_order
from amo.urlresolvers import reverse
from addons.decorators import addon_view_factory
from addons.models import Addon
from addons.utils import get_featured_ids
from browse.views import personas_listing
from bandwagon.models import Collection, SyncedCollection
from discovery.modules import PromoVideoCollection
from reviews.models import Review
from stats.models import GlobalStat
from versions.compare import version_int
from zadmin.decorators import admin_required
from .models import DiscoveryModule
from .forms import DiscoveryModuleForm
from .modules import registry as module_registry
addon_view = addon_view_factory(Addon.objects.valid)
log = commonware.log.getLogger('z.disco')
def get_compat_mode(version):
# Returns appropriate compat mode based on app version.
# Replace when we are ready to deal with bug 711698.
vint = version_int(version)
return 'ignore' if vint >= version_int('10.0') else 'strict'
def pane(request, version, platform, compat_mode=None):
if not compat_mode:
compat_mode = get_compat_mode(version)
def from_api(list_type):
return api_view(request, platform, version, list_type,
compat_mode=compat_mode)
promovideo = PromoVideoCollection().get_items()
return render(request, 'discovery/pane.html',
{'up_and_coming': from_api('hotness'),
'featured_addons': from_api('featured'),
'featured_personas': get_featured_personas(request),
'version': version, 'platform': platform,
'promovideo': promovideo, 'compat_mode': compat_mode})
def pane_account(request):
try:
qs = GlobalStat.objects.filter(name='addon_total_downloads')
addon_downloads = qs.latest().count
except GlobalStat.DoesNotExist:
addon_downloads = None
return render(request, 'discovery/pane_account.html',
{'addon_downloads': addon_downloads})
def promos(request, context, version, platform, compat_mode='strict'):
platform = amo.PLATFORM_DICT.get(version.lower(), amo.PLATFORM_ALL)
modules = get_modules(request, platform.api_name, version)
return render(request, 'addons/impala/homepage_promos.html',
{'modules': modules, 'module_context': context})
def pane_promos(request, version, platform, compat_mode=None):
if not compat_mode:
compat_mode = get_compat_mode(version)
return promos(request, 'discovery', version, platform, compat_mode)
def pane_more_addons(request, section, version, platform, compat_mode=None):
if not compat_mode:
compat_mode = get_compat_mode(version)
def from_api(list_type):
return api_view(request, platform, version, list_type,
compat_mode=compat_mode)
ctx = {}
if section == 'featured':
ctx = {'featured_addons': from_api('featured')}
elif section == 'up-and-coming':
ctx = {'up_and_coming': from_api('hotness')}
return render(request, 'discovery/more_addons.html', ctx)
def get_modules(request, platform, version):
lang = request.LANG
qs = DiscoveryModule.objects.filter(app=request.APP.id)
# Remove any modules without a registered backend or an ordering.
modules = [m for m in qs
if m.module in module_registry and m.ordering is not None]
# Remove modules that specify a locales string we're not part of.
modules = [m for m in modules
if not m.locales or lang in m.locales.split()]
modules = sorted(modules, key=lambda x: x.ordering)
return [module_registry[m.module](request, platform, version)
for m in modules]
def get_featured_personas(request, category=None, num_personas=6):
categories, filter, base, category = personas_listing(request, category)
ids = get_featured_ids(request.APP, request.LANG, type=amo.ADDON_PERSONA)
return manual_order(base, ids, 'addons.id')[:num_personas]
def api_view(request, platform, version, list_type, api_version=1.5,
format='json', mimetype='application/json', compat_mode='strict'):
"""Wrapper for calling an API view."""
view = api.views.ListView()
view.request, view.version = request, api_version
view.format, view.mimetype = format, mimetype
r = view.process_request(list_type, platform=platform, version=version,
compat_mode=compat_mode)
return json.loads(r.content)
@admin_required
def module_admin(request):
APP = request.APP
# Custom sorting to drop ordering=NULL objects to the bottom.
with amo.models.skip_cache():
qs = DiscoveryModule.objects.raw("""
SELECT * from discovery_modules WHERE app_id = %s
ORDER BY ordering IS NULL, ordering""", [APP.id])
qs.ordered = True # The formset looks for this.
_sync_db_and_registry(qs, APP.id)
Form = modelformset_factory(DiscoveryModule, form=DiscoveryModuleForm,
can_delete=True, extra=0)
formset = Form(request.POST or None, queryset=qs)
if request.method == 'POST' and formset.is_valid():
formset.save()
return redirect('discovery.module_admin')
return render(request, 'discovery/module_admin.html', {'formset': formset})
def _sync_db_and_registry(qs, app_id):
"""Match up the module registry and DiscoveryModule rows in the db."""
existing = dict((m.module, m) for m in qs)
to_add = [m for m in module_registry if m not in existing]
to_delete = [m for m in existing if m not in module_registry]
for m in to_add:
DiscoveryModule.objects.get_or_create(module=m, app=app_id)
DiscoveryModule.objects.filter(module__in=to_delete, app=app_id).delete()
if to_add or to_delete:
qs._result_cache = None
@csrf_exempt
@post_required
def recommendations(request, version, platform, limit=9, compat_mode=None):
"""
Figure out recommended add-ons for an anonymous user based on POSTed guids.
POST body looks like {"guids": [...]} with an optional "token" key if
they've been here before.
"""
if not compat_mode:
compat_mode = get_compat_mode(version)
try:
POST = json.loads(request.body)
guids = POST['guids']
except (ValueError, TypeError, KeyError), e:
# Errors: invalid json, didn't get a dict, didn't find "guids".
log.debug('Recommendations return 405 because: %s' % e)
return http.HttpResponseBadRequest()
addon_ids = get_addon_ids(guids)
index = Collection.make_index(addon_ids)
ids, recs = Collection.get_recs_from_ids(addon_ids, request.APP, version,
compat_mode)
recs = _recommendations(request, version, platform, limit, index, ids,
recs, compat_mode)
# We're only storing a percentage of the collections we see because the db
# can't keep up with 100%.
if not waffle.sample_is_active('disco-pane-store-collections'):
return recs
# Users have a token2 if they've been here before. The token matches
# addon_index in their SyncedCollection.
if 'token2' in POST:
token = POST['token2']
if token == index:
# We've seen them before and their add-ons have not changed.
return recs
elif token != index:
# We've seen them before and their add-ons changed. Remove the
# reference to their old synced collection.
(SyncedCollection.objects.filter(addon_index=index)
.update(count=F('count') - 1))
# Try to create the SyncedCollection. There's a unique constraint on
# addon_index so it will fail if this addon_index already exists. If we
# checked for existence first and then created a collection there would
# be a race condition between multiple users with the same addon_index.
try:
c = SyncedCollection.objects.create(addon_index=index, count=1)
c.set_addons(addon_ids)
except IntegrityError:
try:
(SyncedCollection.objects.filter(addon_index=index)
.update(count=F('count') + 1))
except Exception, e:
log.error(u'Could not count++ "%s" (%s).' % (index, e))
return recs
def _recommendations(request, version, platform, limit, token, ids, qs,
compat_mode='strict'):
"""Return a JSON response for the recs view."""
addons = api.views.addon_filter(qs, 'ALL', 0, request.APP, platform,
version, compat_mode, shuffle=False)
addons = dict((a.id, a) for a in addons)
addons = [api.utils.addon_to_dict(addons[i], disco=True,
src='discovery-personalrec')
for i in ids if i in addons][:limit]
data = {'token2': token, 'addons': addons}
content = json.dumps(data, cls=amo.utils.JSONEncoder)
return http.HttpResponse(content, content_type='application/json')
def get_addon_ids(guids):
return list(Addon.objects.filter(guid__in=guids)
.values_list('id', flat=True))
@addon_view
def addon_detail(request, addon):
reviews = Review.objects.valid().filter(addon=addon, is_latest=True)
src = request.GET.get('src', 'discovery-details')
return render(request, 'discovery/addons/detail.html',
{'addon': addon, 'reviews': reviews,
'get_replies': Review.get_replies, 'src': src})
@addon_view
def addon_eula(request, addon, file_id):
if not addon.eula:
return http.HttpResponseRedirect(reverse('discovery.addons.detail',
args=[addon.slug]))
if file_id is not None:
version = get_object_or_404(addon.versions, files__id=file_id)
else:
version = addon.current_version
src = request.GET.get('src', 'discovery-details')
return render(request, 'discovery/addons/eula.html',
{'addon': addon, 'version': version, 'src': src})
def recs_transform(recs):
ids = [r.addon_id for r in recs] + [r.other_addon_id for r in recs]
addons = dict((a.id, a) for a in Addon.objects.filter(id__in=ids))
for r in recs:
r.addon = addons[r.addon_id]
r.other_addon = addons[r.other_addon_id]
|
|
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Centralize knowledge about how to create standardized Google Storage paths.
This includes definitions for various build flags:
SKIP - means a given build is bad and should not have payloads generated.
FINISHED - means that the payloads have been fully generated.
LOCK - means that payload processing is in progress on the host which
owns the locks. Locks have a timeout associated with them in
case of error, but are not 100% atomic when a lock is timing out.
Example file paths:
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/SKIP_flag
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/FINISHED_flag
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/LOCK_flag
"""
from __future__ import print_function
import hashlib
import os
import random
import re
from chromite.lib.paygen import utils
class Build(utils.RestrictedAttrDict):
"""Define a ChromeOS Build.
The order of attributes in self._slots dictates the order attributes
are printed in by __str__ method of super class. Keep the attributes
that are more helpful in identifying this build earlier in the list,
because this string ends up cut off in email subjects.
Fields:
board: The board of the image "x86-mario", etc.
bucket: The bucket of the image. "chromeos-releases" as default.
channel: The channel of the image "stable-channel", "nplusone", etc.
uri: The URI of the build directory.
version: The version of the image. "0.14.23.2", "3401.0.0", etc.
"""
_slots = ('board', 'version', 'channel', 'bucket', 'uri')
_name = 'Build definition'
def __init__(self, *args, **kwargs):
super(Build, self).__init__(*args, **kwargs)
# If these match defaults, set to None.
self._clear_if_default('bucket', ChromeosReleases.BUCKET)
class Image(utils.RestrictedAttrDict):
"""Define a ChromeOS Image.
Fields:
board: The board of the image "x86-mario", etc.
bucket: The bucket of the image. "chromeos-releases" as default.
channel: The channel of the image "stable-channel", "nplusone", etc.
image_channel: Sometimes an image has a different channel than the build
directory it's in. (ie: nplusone). None otherwise.
image_version: Sometimes an image has a different version than the build
directory it's in. (ie: nplusone). None otherwise.
key: The key the image was signed with. "premp", "mp", "mp-v2"
This is not the board specific key name, but the general value used
in image/payload names.
uri: The URI of the image. This URI can be any format understood by
urilib.
version: The version of the image. "0.14.23.2", "3401.0.0", etc.
"""
_name = 'Image definition'
_slots = ('board', 'version', 'channel', 'key',
'image_channel', 'image_version', 'bucket',
'uri')
def __init__(self, *args, **kwargs):
super(Image, self).__init__(*args, **kwargs)
# If these match defaults, set to None.
self._clear_if_default('bucket', ChromeosReleases.BUCKET)
self._clear_if_default('image_channel', self['channel'])
self._clear_if_default('image_version', self['version'])
def __str__(self):
if self.uri:
return '%s' % self.uri.split('/')[-1]
else:
return ('Image: %s:%s/%s%s/%s%s/%s (no uri)' %
(self.bucket, self.board, self.channel,
'(%s)' % self.image_channel if self.image_channel else '',
self.version,
'(%s)' % self.image_version if self.image_version else '',
self.key))
class UnsignedImageArchive(utils.RestrictedAttrDict):
"""Define a unsigned ChromeOS image archive.
Fields:
bucket: The bucket of the image. "chromeos-releases" as default.
channel: The channel of the image "stable-channel", "nplusone", etc.
board: The board of the image "x86-mario", etc.
version: The version of the image. "0.14.23.2", "3401.0.0", etc.
milestone: the most recent branch corresponding to the version; "R19" etc
image_type: "test" or "recovery"
uri: The URI of the image. This URI can be any format understood by
urilib.
"""
_name = 'Unsigned image archive definition'
_slots = ('bucket', 'channel', 'board', 'version', 'milestone', 'image_type',
'uri')
def __str__(self):
if self.uri:
return '%s' % self.uri.split('/')[-1]
else:
return ('Unsigned image archive: %s:%s/%s/%s-%s/%s (no uri)' %
(self.bucket, self.board, self.channel,
self.milestone, self.version,
self.image_type))
class Payload(utils.RestrictedAttrDict):
"""Define a ChromeOS Payload.
Fields:
tgt_image: An instance of Image saying what the payload updates to.
src_image: An instance of Image showing what it updates from. None for
Full updates.
uri: The URI of the payload. This can be any format understood by urilib.
"""
_name = 'Payload definition'
_slots = ('tgt_image', 'src_image', 'uri')
def __str__(self):
if self.uri:
return self.uri.split('/')[-1]
else:
return '%s -> %s (no uri)' % (self.src_image or 'any', self.tgt_image)
class ChromeosReleases(object):
"""Name space class for static methods for URIs in chromeos-releases."""
BUCKET = 'chromeos-releases'
# Build flags
SKIP = 'SKIP'
FINISHED = 'FINISHED'
LOCK = 'LOCK'
FLAGS = (SKIP, FINISHED, LOCK)
UNSIGNED_IMAGE_TYPES = ('test', 'recovery')
@staticmethod
def BuildUri(channel, board, version, bucket=None):
"""Creates the gspath for a given build.
Args:
channel: What channel does the build belong too. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: "What is the build version. "3015.0.0", "1945.76.3", etc
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build artifacts. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3
"""
if not bucket:
bucket = ChromeosReleases.BUCKET
return 'gs://%s/%s/%s/%s' % (bucket, channel, board, version)
@staticmethod
def GeneratorUri(channel, board, version, bucket=None):
"""Creates the gspath for a given build image.
Args:
channel: What channel does the build belong too. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version. "3015.0.0", "1945.76.3", etc
bucket: What bucket is the build in? Usually "chromeos-releases".
Returns:
The url for the specified build's delta generator zip file.
"""
return os.path.join(ChromeosReleases.BuildUri(channel,
board,
version,
bucket=bucket),
'au-generator.zip')
@staticmethod
def BuildPayloadsUri(channel, board, version, bucket=None):
"""Creates the gspath for the payloads of a given build.
Args:
channel: What channel does the build belong too. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: "What is the build version. "3015.0.0", "1945.76.3", etc
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's payloads. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads
"""
return os.path.join(ChromeosReleases.BuildUri(channel,
board,
version,
bucket=bucket),
'payloads')
@staticmethod
def BuildPayloadsSigningUri(channel, board, version, bucket=None):
"""Creates the base gspath for payload signing files.
We create a number of files during signer interaction. This method creates
the base path for all such files associated with a given build. There
should still be subdirectories per-payload to avoid collisions, but by
using this uniform base pass clean up can be more reliable.
Args:
channel: What channel does the build belong to. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version. "3015.0.0", "1945.76.3", etc
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's payloads. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/signing
"""
return os.path.join(ChromeosReleases.BuildPayloadsUri(channel,
board,
version,
bucket=bucket),
'signing')
@staticmethod
def BuildPayloadsFlagUri(channel, board, version, flag, bucket=None):
"""Creates the gspath for a given build flag.
SKIP - means a given build is bad and should not have payloads generated.
FINISHED - means that the payloads have been fully generated.
LOCK - means that payload processing is in progress on the host which
owns the locks. Locks have a timeout associated with them in
case of error, but are not 100% atomic when a lock is timing out.
Args:
channel: What channel does the build belong too. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version. "3015.0.0", "1945.76.3", etc
flag: gs_paths.SKIP, gs_paths.FINISHED, or gs_paths.LOCK
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's payloads. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/SKIP_FLAG
"""
assert flag in ChromeosReleases.FLAGS
return os.path.join(ChromeosReleases.BuildPayloadsUri(channel,
board,
version,
bucket=bucket),
'%s_flag' % flag)
@staticmethod
def ImageName(channel, board, version, key):
"""Creates the base file name for a given build image.
Args:
channel: What channel does the build belong too. Usually xxx-channel.
board: What board is the build for? "x86-alex", "lumpy", etc.
version: "What is the build version. "3015.0.0", "1945.76.3", etc
key: "What is the signing key. "premp", "mp", "mp-v2", etc
Returns:
The name of the specified image. Should be of the form:
chromeos_1.2.3_board-name_recovery_blah-channel_key.bin
"""
template = 'chromeos_%(version)s_%(board)s_recovery_%(channel)s_%(key)s.bin'
return template % {
'channel': channel,
'board': board,
'version': version,
'key': key,
}
@staticmethod
def UnsignedImageArchiveName(board, version, milestone, image_type):
"""The base name for the tarball containing an unsigned build image.
Args:
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version? "3015.0.0", "1945.76.3", etc
milestone: the most recent branch corresponding to the version; "R19" etc
image_type: either "recovery" or "test", currently
Returns:
The name of the specified image archive. Should be of the form:
ChromeOS-type-R19-1.2.3-board-name.tar.xz
"""
template = (
'ChromeOS-%(image_type)s-%(milestone)s-%(version)s-%(board)s.tar.xz')
return template % {
'board': board,
'version': version,
'milestone': milestone,
'image_type': image_type,
}
@staticmethod
def ImageUri(channel, board, version, key,
image_channel=None, image_version=None,
bucket=None):
"""Creates the gspath for a given build image.
Args:
channel: What channel does the build belong too? Usually "xxx-channel"
board: What board is the build for? "x86-alex", "lumpy", etc
version: What is the build version? "3015.0.0", "1945.76.3", etc
key: What is the signing key? "premp", "mp", "mp-v2", etc
image_channel: Sometimes an image has a different channel than the build
directory it's in. (ie: nplusone).
image_version: Sometimes an image has a different version than the build
directory it's in. (ie: nplusone).
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's image. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/
chromeos_1.2.3_board-name_recovery_blah-channel_key.bin
"""
if not image_channel:
image_channel = channel
if not image_version:
image_version = version
return os.path.join(
ChromeosReleases.BuildUri(channel, board, version, bucket=bucket),
ChromeosReleases.ImageName(image_channel, board, image_version, key))
@staticmethod
def UnsignedImageArchiveUri(channel, board, version, milestone, image_type,
bucket=None):
"""Creates the gspath for a given unsigned build image archive.
Args:
channel: What channel does the build belong too? Usually "xxx-channel"
board: What board is the build for? "x86-alex", "lumpy", etc
version: What is the build version? "3015.0.0", "1945.76.3", etc
milestone: the most recent branch corresponding to the version; "R19" etc
image_type: either "recovery" or "test", currently
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's image. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/
ChromeOS-type-R19-1.2.3-board-name.tar.xz
"""
return os.path.join(
ChromeosReleases.BuildUri(channel, board, version, bucket=bucket),
ChromeosReleases.UnsignedImageArchiveName(board, version,
milestone, image_type))
@classmethod
def ParseImageUri(cls, image_uri):
"""Parse the URI of an image into an Image object."""
# The named values in this regex must match the arguments to gspaths.Image.
exp = (r'^gs://(?P<bucket>.*)/(?P<channel>.*)/(?P<board>.*)/'
r'(?P<version>.*)/chromeos_(?P<image_version>[^_]+)_'
r'(?P=board)_recovery_(?P<image_channel>[^_]+)_(?P<key>[^_]+).bin$')
m = re.match(exp, image_uri)
if not m:
return None
values = m.groupdict()
# Insert the URI
values['uri'] = image_uri
# Create an Image object using the values we parsed out.
return Image(values)
@classmethod
def ParseUnsignedImageArchiveUri(cls, image_uri):
"""Parse the URI of an image into an UnsignedImageArchive object."""
# The named values in this regex must match the arguments to gspaths.Image.
exp = (r'gs://(?P<bucket>[^/]+)/(?P<channel>[^/]+)/'
r'(?P<board>[^/]+)/(?P<version>[^/]+)/'
r'ChromeOS-(?P<image_type>%s)-(?P<milestone>R[0-9]+)-'
r'(?P=version)-(?P=board).tar.xz' %
'|'.join(cls.UNSIGNED_IMAGE_TYPES))
m = re.match(exp, image_uri)
if not m:
return None
values = m.groupdict()
# Insert the URI
values['uri'] = image_uri
# Reset values if they match their defaults.
if values['bucket'] == cls.BUCKET:
values['bucket'] = None
# Create an Image object using the values we parsed out.
return UnsignedImageArchive(values)
@staticmethod
def PayloadName(channel, board, version, key=None, random_str=None,
src_version=None, unsigned_image_type='test'):
"""Creates the gspath for a payload associated with a given build.
Args:
channel: What channel does the build belong to? Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version? "3015.0.0", "1945.76.3", etc
key: What is the signing key? "premp", "mp", "mp-v2", etc; None (default)
indicates that the image is not signed, e.g. a test image
image_channel: Sometimes an image has a different channel than the build
directory it's in. (ie: nplusone).
image_version: Sometimes an image has a different version than the build
directory it's in. (ie: nplusone).
random_str: Force a given random string. None means generate one.
src_version: If this payload is a delta, this is the version of the image
it updates from.
unsigned_image_type: the type descriptor (string) of an unsigned image;
significant iff key is None (default: "test")
Returns:
The name for the specified build's payloads. Should be of the form:
chromeos_0.12.433.257-2913.377.0_x86-alex_stable-channel_
delta_mp-v3.bin-b334762d0f6b80f471069153bbe8b97a.signed
chromeos_2913.377.0_x86-alex_stable-channel_full_mp-v3.
bin-610c97c30fae8561bde01a6116d65cb9.signed
"""
if random_str is None:
random.seed()
random_str = hashlib.md5(str(random.getrandbits(128))).hexdigest()
if key is None:
signed_ext = ''
key = unsigned_image_type
else:
signed_ext = '.signed'
if src_version:
template = ('chromeos_%(src_version)s-%(version)s_%(board)s_%(channel)s_'
'delta_%(key)s.bin-%(random_str)s%(signed_ext)s')
return template % {
'channel': channel,
'board': board,
'version': version,
'key': key,
'random_str': random_str,
'src_version': src_version,
'signed_ext': signed_ext,
}
else:
template = ('chromeos_%(version)s_%(board)s_%(channel)s_'
'full_%(key)s.bin-%(random_str)s%(signed_ext)s')
return template % {
'channel': channel,
'board': board,
'version': version,
'key': key,
'random_str': random_str,
'signed_ext': signed_ext,
}
@staticmethod
def PayloadUri(channel, board, version, random_str, key=None,
image_channel=None, image_version=None,
src_version=None,
bucket=None):
"""Creates the gspath for a payload associated with a given build.
Args:
channel: What channel does the build belong to? Usually "xxx-channel"
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version? "3015.0.0", "1945.76.3", etc
key: What is the signing key? "premp", "mp", "mp-v2", etc; None means
that the image is unsigned (e.g. a test image)
image_channel: Sometimes an image has a different channel than the build
directory it's in. (ie: nplusone).
image_version: Sometimes an image has a different version than the build
directory it's in. (ie: nplusone).
random_str: Force a given random string. None means generate one.
src_version: If this payload is a delta, this is the version of the image
it updates from.
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's payloads. Should be of the form:
gs://chromeos-releases/stable-channel/x86-alex/2913.377.0/payloads/
chromeos_0.12.433.257-2913.377.0_x86-alex_stable-channel_
delta_mp-v3.bin-b334762d0f6b80f471069153bbe8b97a.signed
gs://chromeos-releases/stable-channel/x86-alex/2913.377.0/payloads/
chromeos_2913.377.0_x86-alex_stable-channel_full_mp-v3.
bin-610c97c30fae8561bde01a6116d65cb9.signed
"""
if image_channel is None:
image_channel = channel
if image_version is None:
image_version = version
return os.path.join(ChromeosReleases.BuildPayloadsUri(channel,
board,
version,
bucket=bucket),
ChromeosReleases.PayloadName(image_channel,
board,
image_version,
key,
random_str,
src_version))
@classmethod
def ParsePayloadUri(cls, payload_uri):
"""Parse the URI of an image into an Image object."""
# Sample Delta URI:
# gs://chromeos-releases/stable-channel/x86-mario/4731.72.0/payloads/
# chromeos_4537.147.0-4731.72.0_x86-mario_stable-channel_delta_mp-v3.bin-
# 3a90d8666d1d42b7a7367660b897e8c9.signed
# Sample Full URI:
# gs://chromeos-releases/stable-channel/x86-mario/4731.72.0/payloads/
# chromeos_4731.72.0_x86-mario_stable-channel_full_mp-v3.bin-
# 969f24ba8cbf2096ebe3c57d5f0253b7.signed
# Handle FULL payload URIs.
full_exp = (r'^gs://(?P<bucket>.*)/(?P<channel>.*)/(?P<board>.*)/'
r'(?P<version>.*)/payloads/chromeos_(?P<image_version>[^_]+)_'
r'(?P=board)_(?P<image_channel>[^_]+)_full_(?P<key>[^_]+)\.bin'
r'-[0-9A-Fa-f]+\.signed$')
m = re.match(full_exp, payload_uri)
if m:
image_values = m.groupdict()
# The image URIs can't be discovered from the payload URI.
image_values['uri'] = None
# Create the Payload.
tgt_image = Image(image_values)
return Payload(tgt_image=tgt_image, uri=payload_uri)
# Handle DELTA payload URIs.
delta_exp = (r'^gs://(?P<bucket>.*)/(?P<channel>.*)/(?P<board>.*)/'
r'(?P<version>.*)/payloads/chromeos_(?P<src_version>[^_]+)-'
r'(?P<image_version>[^_]+)_(?P=board)_'
r'(?P<image_channel>[^_]+)_delta_(?P<key>[^_]+)\.bin'
r'-[0-9A-Fa-f]+\.signed$')
m = re.match(delta_exp, payload_uri)
if m:
image_values = m.groupdict()
# The image URIs can't be discovered from the payload URI.
image_values['uri'] = None
# Remember the src_version for the src_image.
src_version = image_values['src_version']
del image_values['src_version']
# Create the payload.
tgt_image = Image(image_values)
# Set the values which are different for src versions.
image_values['version'] = src_version
# The payload URI doesn't tell us any of these values. However, it's
# a mostly safe bet that the src version has no
# image_version/image_channel.
# Not knowing the source key is problematic.
image_values['image_version'] = None
image_values['image_channel'] = None
image_values['key'] = None
src_image = Image(image_values)
return Payload(src_image=src_image, tgt_image=tgt_image, uri=payload_uri)
# The URI didn't match.
return None
class ChromeosImageArchive(object):
"""Name space class for static methods for URIs in chromeos-image-archive."""
BUCKET = 'chromeos-image-archive'
@classmethod
def BuildUri(cls, board, milestone, version, bucket=None):
"""Creates the gspath for a given build.
Args:
board: What board is the build for? "x86-alex", "lumpy", etc.
milestone: a number that defines the milestone mark, e.g. 19 for R19
version: "What is the build version. "3015.0.0", "1945.76.3", etc
bucket: the bucket the build in (None means cls.BUCKET)
Returns:
The url for the specified build artifacts. Should be of the form:
gs://chromeos-image-archive/board-release/R23-4.5.6
"""
bucket = bucket or cls.BUCKET
return 'gs://%s/%s-release/R%s-%s' % (bucket, board, milestone, version)
def VersionKey(version):
"""Convert a version string to a comparable value.
All old style values are considered older than all new style values.
The actual values returned should only be used for comparison against
other VersionKey results.
Args:
version: String with a build version "1.2.3" or "0.12.3.4"
Returns:
A value comparable against other version strings.
"""
key = [int(n) for n in version.split('.')]
# 3 number versions are new style.
# 4 number versions are old style.
assert len(key) in (3, 4)
if len(key) == 3:
# 1.2.3 -> (1, 0, 1, 2, 3)
return [1, 0] + key
else:
# 0.12.3.4 -> (0, 0, 12, 3, 4)
return [0] + key
def VersionGreater(left, right):
"""Compare two version strings. left > right
Args:
left: String with lefthand version string "1.2.3" or "0.12.3.4"
right: String with righthand version string "1.2.3" or "0.12.3.4"
Returns:
left > right taking into account new style versions versus old style.
"""
return VersionKey(left) > VersionKey(right)
|