gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from core import C
from singleton import S
from operations import AssocOp
from cache import cacheit
from numbers import ilcm, igcd
from collections import defaultdict
class Add(AssocOp):
__slots__ = []
is_Add = True
#identity = S.Zero
# cyclic import, so defined in numbers.py
@classmethod
def flatten(cls, seq):
"""
Takes the sequence "seq" of nested Adds and returns a flatten list.
Returns: (commutative_part, noncommutative_part, order_symbols)
Applies associativity, all terms are commutable with respect to
addition.
NB: the removal of 0 is already handled by AssocOp.__new__
See also
========
sympy.core.mul.Mul.flatten
"""
rv = None
if len(seq) == 2:
a, b = seq
if b.is_Rational:
a, b = b, a
assert a
if a.is_Rational:
if b.is_Mul:
# if it's an unevaluated 2-arg, expand it
c, t = b.as_coeff_Mul()
if t.is_Add:
h, t = t.as_coeff_Add()
bargs = [c*ti for ti in Add.make_args(t)]
bargs.sort(key=hash)
ch = c*h
if ch:
bargs.insert(0, ch)
b = Add._from_args(bargs)
if b.is_Add:
bargs = list(b.args)
if bargs[0].is_Number:
bargs[0] += a
if not bargs[0]:
bargs.pop(0)
else:
bargs.insert(0, a)
rv = bargs, [], None
elif b.is_Mul:
rv = [a, b], [], None
if rv:
if all(s.is_commutative for s in rv[0]):
return rv
return [], rv[0], None
terms = {} # term -> coeff
# e.g. x**2 -> 5 for ... + 5*x**2 + ...
coeff = S.Zero # standalone term (Number or zoo will always be in slot 0)
# e.g. 3 + ...
order_factors = []
for o in seq:
# O(x)
if o.is_Order:
for o1 in order_factors:
if o1.contains(o):
o = None
break
if o is None:
continue
order_factors = [o]+[o1 for o1 in order_factors if not o.contains(o1)]
continue
# 3 or NaN
elif o.is_Number:
if o is S.NaN or coeff is S.ComplexInfinity and o.is_bounded is False:
# we know for sure the result will be nan
return [S.NaN], [], None
if coeff.is_Number:
coeff += o
if coeff is S.NaN:
# we know for sure the result will be nan
return [S.NaN], [], None
continue
elif o is S.ComplexInfinity:
if coeff.is_bounded is False:
# we know for sure the result will be nan
return [S.NaN], [], None
coeff = S.ComplexInfinity
continue
# Add([...])
elif o.is_Add:
# NB: here we assume Add is always commutative
seq.extend(o.args) # TODO zerocopy?
continue
# Mul([...])
elif o.is_Mul:
c, s = o.as_coeff_Mul()
# 3*...
# unevaluated 2-arg Mul, but we always unfold it so
# it can combine with other terms (just like is done
# with the Pow below)
if c.is_Number and s.is_Add:
seq.extend([c*a for a in s.args])
continue
# check for unevaluated Pow, e.g. 2**3 or 2**(-1/2)
elif o.is_Pow:
b, e = o.as_base_exp()
if b.is_Number and (e.is_Integer or (e.is_Rational and e.is_negative)):
seq.append(b**e)
continue
c, s = S.One, o
else:
# everything else
c = S.One
s = o
# now we have:
# o = c*s, where
#
# c is a Number
# s is an expression with number factor extracted
# let's collect terms with the same s, so e.g.
# 2*x**2 + 3*x**2 -> 5*x**2
if s in terms:
terms[s] += c
else:
terms[s] = c
# now let's construct new args:
# [2*x**2, x**3, 7*x**4, pi, ...]
newseq = []
noncommutative = False
for s,c in terms.items():
# 0*s
if c is S.Zero:
continue
# 1*s
elif c is S.One:
newseq.append(s)
# c*s
else:
if s.is_Mul:
# Mul, already keeps its arguments in perfect order.
# so we can simply put c in slot0 and go the fast way.
cs = s._new_rawargs(*((c,) + s.args))
newseq.append(cs)
else:
# alternatively we have to call all Mul's machinery (slow)
newseq.append(Mul(c,s))
noncommutative = noncommutative or not s.is_commutative
# oo, -oo
if coeff is S.Infinity:
newseq = [f for f in newseq if not (f.is_nonnegative or f.is_real and
(f.is_bounded or
f.is_finite or
f.is_infinitesimal))]
elif coeff is S.NegativeInfinity:
newseq = [f for f in newseq if not (f.is_nonpositive or f.is_real and
(f.is_bounded or
f.is_finite or
f.is_infinitesimal))]
if coeff is S.ComplexInfinity:
# zoo might be
# unbounded_real + bounded_im
# bounded_real + unbounded_im
# unbounded_real + unbounded_im
# addition of a bounded real or imaginary number won't be able to
# change the zoo nature; if unbounded a NaN condition could result if
# the unbounded symbol had sign opposite of the unbounded portion of zoo,
# e.g. unbounded_real - unbounded_real
newseq = [c for c in newseq if not (c.is_bounded and
c.is_real is not None)]
# process O(x)
if order_factors:
newseq2 = []
for t in newseq:
for o in order_factors:
# x + O(x) -> O(x)
if o.contains(t):
t = None
break
# x + O(x**2) -> x + O(x**2)
if t is not None:
newseq2.append(t)
newseq = newseq2 + order_factors
# 1 + O(1) -> O(1)
for o in order_factors:
if o.contains(coeff):
coeff = S.Zero
break
# order args canonically
# Currently we sort things using hashes, as it is quite fast. A better
# solution is not to sort things at all - but this needs some more
# fixing. NOTE: this is used in primitive and Mul.flattten, too, so if
# it changes here it should be changed there.
newseq.sort(key=hash)
# current code expects coeff to be first
if coeff is not S.Zero:
newseq.insert(0, coeff)
# we are done
if noncommutative:
return [], newseq, None
else:
return newseq, [], None
@classmethod
def class_key(cls):
"""Nice order of classes"""
return 3, 1, cls.__name__
def as_coefficients_dict(a):
"""Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. If an expression is
not an Add it is considered to have a single term.
Examples
========
>>> from sympy.abc import a, x
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
"""
d = defaultdict(list)
for ai in a.args:
c, m = ai.as_coeff_Mul()
d[m].append(c)
for k, v in d.iteritems():
if len(v) == 1:
d[k] = v[0]
else:
d[k] = Add(*v)
di = defaultdict(int)
di.update(d)
return di
@cacheit
def as_coeff_add(self, *deps):
"""
Returns a tuple (coeff, args) where self is treated as an Add and coeff
is the Number term and args is a tuple of all other terms.
Examples
========
>>> from sympy.abc import x, y
>>> (7 + 3*x).as_coeff_add()
(7, (3*x,))
>>> (7*x).as_coeff_add()
(0, (7*x,))
"""
if deps:
l1 = []
l2 = []
for f in self.args:
if f.has(*deps):
l2.append(f)
else:
l1.append(f)
return self._new_rawargs(*l1), tuple(l2)
coeff, notrat = self.args[0].as_coeff_add()
if coeff is not S.Zero:
return coeff, notrat + self.args[1:]
return S.Zero, self.args
def as_coeff_Add(self):
"""Efficiently extract the coefficient of a summation. """
coeff, args = self.args[0], self.args[1:]
if coeff.is_Number:
if len(args) == 1:
return coeff, args[0]
else:
return coeff, self._new_rawargs(*args)
else:
return S.Zero, self
# Note, we intentionally do not implement Add.as_coeff_mul(). Rather, we
# let Expr.as_coeff_mul() just always return (S.One, self) for an Add. See
# issue 2425.
def _eval_derivative(self, s):
return Add(*[f.diff(s) for f in self.args])
def _eval_nseries(self, x, n, logx):
terms = [t.nseries(x, n=n, logx=logx) for t in self.args]
return Add(*terms)
def _matches_simple(self, expr, repl_dict):
# handle (w+3).matches('x+5') -> {w: x+2}
coeff, terms = self.as_coeff_add()
if len(terms)==1:
return terms[0].matches(expr - coeff, repl_dict)
return
matches = AssocOp._matches_commutative
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs - rhs, but treats arguments like symbols, so things like
oo - oo return 0, instead of a nan.
"""
from sympy import oo, I, expand_mul
if lhs == oo and rhs == oo or lhs == oo*I and rhs == oo*I:
return S.Zero
return expand_mul(lhs - rhs)
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_add() which gives the head and a tuple containing
the arguments of the tail when treated as an Add.
- if you want the coefficient when self is treated as a Mul
then use self.as_coeff_mul()[0]
>>> from sympy.abc import x, y
>>> (3*x*y).as_two_terms()
(3, x*y)
"""
if len(self.args) == 1:
return S.Zero, self
return self.args[0], self._new_rawargs(*self.args[1:])
def as_numer_denom(self):
# clear rational denominator
content, expr = self.primitive()
ncon, dcon = content.as_numer_denom()
# collect numerators and denominators of the terms
nd = defaultdict(list)
for f in expr.args:
ni, di = f.as_numer_denom()
nd[di].append(ni)
# put infinity in the numerator
if S.Zero in nd:
n = nd.pop(S.Zero)
assert len(n) == 1
n = n[0]
nd[S.One].append(n/S.Zero)
# check for quick exit
if len(nd) == 1:
d, n = nd.popitem()
return Add(*[_keep_coeff(ncon, ni) for ni in n]), _keep_coeff(dcon, d)
# sum up the terms having a common denominator
for d, n in nd.iteritems():
if len(n) == 1:
nd[d] = n[0]
else:
nd[d] = Add(*n)
# assemble single numerator and denominator
denoms, numers = [list(i) for i in zip(*nd.iteritems())]
n, d = Add(*[Mul(*(denoms[:i]+[numers[i]]+denoms[i+1:]))
for i in xrange(len(numers))]), Mul(*denoms)
return _keep_coeff(ncon, n), _keep_coeff(dcon, d)
def _eval_is_polynomial(self, syms):
return all(term._eval_is_polynomial(syms) for term in self.args)
def _eval_is_rational_function(self, syms):
return all(term._eval_is_rational_function(syms) for term in self.args)
# assumption methods
_eval_is_real = lambda self: self._eval_template_is_attr('is_real', when_multiple=None)
_eval_is_antihermitian = lambda self: self._eval_template_is_attr('is_antihermitian', when_multiple=None)
_eval_is_bounded = lambda self: self._eval_template_is_attr('is_bounded', when_multiple=None)
_eval_is_hermitian = lambda self: self._eval_template_is_attr('is_hermitian', when_multiple=None)
_eval_is_imaginary = lambda self: self._eval_template_is_attr('is_imaginary', when_multiple=None)
_eval_is_integer = lambda self: self._eval_template_is_attr('is_integer', when_multiple=None)
_eval_is_commutative = lambda self: self._eval_template_is_attr('is_commutative')
def _eval_is_odd(self):
l = [f for f in self.args if not (f.is_even==True)]
if not l:
return False
if l[0].is_odd:
return self._new_rawargs(*l[1:]).is_even
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
others = list(self.args)
others.remove(t)
if all(x.is_rational is True for x in others):
return True
return None
if a is None:
return
return False
def _eval_is_positive(self):
if self.is_number:
return super(Add, self)._eval_is_positive()
pos = nonneg = nonpos = unknown_sign = False
unbounded = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
ispos = a.is_positive
ubound = a.is_unbounded
if ubound:
unbounded.add(ispos)
if len(unbounded) > 1:
return None
if ispos:
pos = True
continue
elif a.is_nonnegative:
nonneg = True
continue
elif a.is_nonpositive:
nonpos = True
continue
elif a.is_zero:
continue
if ubound is None:
# sign is unknown; if we don't know the boundedness
# we're done: we don't know. That is technically true,
# but the only option is that we have something like
# oo - oo which is NaN and it really doesn't matter
# what sign we apply to that because it (when finally
# computed) will trump any sign. So instead of returning
# None, we pass.
pass
else:
return None
unknown_sign = True
if unbounded:
return unbounded.pop()
elif unknown_sign:
return None
elif not nonpos and not nonneg and pos:
return True
elif not nonpos and pos:
return True
elif not pos and not nonneg:
return False
def _eval_is_negative(self):
if self.is_number:
return super(Add, self)._eval_is_negative()
neg = nonpos = nonneg = unknown_sign = False
unbounded = set()
args = [a for a in self.args if not a.is_zero]
if not args:
return False
for a in args:
isneg = a.is_negative
ubound = a.is_unbounded
if ubound:
unbounded.add(isneg)
if len(unbounded) > 1:
return None
if isneg:
neg = True
continue
elif a.is_nonpositive:
nonpos = True
continue
elif a.is_nonnegative:
nonneg = True
continue
elif a.is_zero:
continue
if ubound is None:
# sign is unknown; if we don't know the boundedness
# we're done: we don't know. That is technically true,
# but the only option is that we have something like
# oo - oo which is NaN and it really doesn't matter
# what sign we apply to that because it (when finally
# computed) will trump any sign. So instead of returning
# None, we pass.
pass
unknown_sign = True
if unbounded:
return unbounded.pop()
elif unknown_sign:
return None
elif not nonneg and not nonpos and neg:
return True
elif not nonneg and neg:
return True
elif not neg and not nonpos:
return False
def _eval_subs(self, old, new):
if not old.is_Add:
return None
coeff_self, terms_self = self.as_coeff_Add()
coeff_old, terms_old = old.as_coeff_Add()
if coeff_self.is_Rational and coeff_old.is_Rational:
if terms_self == terms_old: # (2 + a).subs( 3 + a, y) -> -1 + y
return Add( new, coeff_self, -coeff_old)
if terms_self == -terms_old: # (2 + a).subs(-3 - a, y) -> -1 - y
return Add(-new, coeff_self, coeff_old)
if coeff_self.is_Rational and coeff_old.is_Rational \
or coeff_self == coeff_old:
args_old, args_self = Add.make_args(terms_old), Add.make_args(terms_self)
if len(args_old) < len(args_self): # (a+b+c+d).subs(b+c,x) -> a+x+d
self_set = set(args_self)
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return Add(new, coeff_self, -coeff_old,
*[s._subs(old, new) for s in ret_set])
args_old = Add.make_args(-terms_old) # (a+b+c+d).subs(-b-c,x) -> a-x+d
old_set = set(args_old)
if old_set < self_set:
ret_set = self_set - old_set
return Add(-new, coeff_self, coeff_old,
*[s._subs(old, new) for s in ret_set])
def removeO(self):
args = [a for a in self.args if not a.is_Order]
return self._new_rawargs(*args)
def getO(self):
args = [a for a in self.args if a.is_Order]
if args:
return self._new_rawargs(*args)
@cacheit
def extract_leading_order(self, *symbols):
"""
Returns the leading term and it's order.
Examples
========
>>> from sympy.abc import x
>>> (x + 1 + 1/x**5).extract_leading_order(x)
((x**(-5), O(x**(-5))),)
>>> (1 + x).extract_leading_order(x)
((1, O(1)),)
>>> (x + x**2).extract_leading_order(x)
((x, O(x)),)
"""
lst = []
seq = [(f, C.Order(f, *symbols)) for f in self.args]
for ef, of in seq:
for e, o in lst:
if o.contains(of) and o != of:
of = None
break
if of is None:
continue
new_lst = [(ef, of)]
for e, o in lst:
if of.contains(o) and o != of:
continue
new_lst.append((e, o))
lst = new_lst
return tuple(lst)
def as_real_imag(self, deep=True, **hints):
"""
returns a tuple represeting a complex numbers
Examples
========
>>> from sympy import I
>>> (7 + 9*I).as_real_imag()
(7, 9)
"""
sargs, terms = self.args, []
re_part, im_part = [], []
for term in sargs:
re, im = term.as_real_imag(deep=deep)
re_part.append(re)
im_part.append(im)
return (self.func(*re_part), self.func(*im_part))
def _eval_as_leading_term(self, x):
from sympy import expand_mul, factor_terms
old = self
self = expand_mul(self)
if not self.is_Add:
return self.as_leading_term(x)
unbounded = [t for t in self.args if t.is_unbounded]
if unbounded:
return Add._from_args(unbounded)
self = Add(*[t.as_leading_term(x) for t in self.args]).removeO()
if not self:
# simple leading term analysis gave us 0 but we have to send
# back a term, so compute the leading term (via series)
return old.compute_leading_term(x)
elif not self.is_Add:
return self
else:
plain = Add(*[s for s, _ in self.extract_leading_order(x)])
rv = factor_terms(plain, fraction=False)
rv_fraction = factor_terms(rv, fraction=True)
# if it simplifies to an x-free expression, return that;
# tests don't fail if we don't but it seems nicer to do this
if x not in rv_fraction.free_symbols:
return rv_fraction
return rv
def _eval_adjoint(self):
return Add(*[t.adjoint() for t in self.args])
def _eval_conjugate(self):
return Add(*[t.conjugate() for t in self.args])
def _eval_transpose(self):
return Add(*[t.transpose() for t in self.args])
def __neg__(self):
return Add(*[-t for t in self.args])
def _sage_(self):
s = 0
for x in self.args:
s += x._sage_()
return s
def primitive(self):
"""
Return ``(R, self/R)`` where ``R``` is the Rational GCD of ``self```.
``R`` is collected only from the leading coefficient of each term.
Examples
========
>>> from sympy.abc import x, y
>>> (2*x + 4*y).primitive()
(2, x + 2*y)
>>> (2*x/3 + 4*y/9).primitive()
(2/9, 3*x + 2*y)
>>> (2*x/3 + 4.2*y).primitive()
(1/3, 2*x + 12.6*y)
No subprocessing of term factors is performed:
>>> ((2 + 2*x)*x + 2).primitive()
(1, x*(2*x + 2) + 2)
Recursive subprocessing can be done with the as_content_primitive()
method:
>>> ((2 + 2*x)*x + 2).as_content_primitive()
(2, x*(x + 1) + 1)
See also: primitive() function in polytools.py
"""
terms = []
inf = False
for a in self.args:
c, m = a.as_coeff_Mul()
if not c.is_Rational:
c = S.One
m = a
inf = inf or m is S.ComplexInfinity
terms.append((c.p, c.q, m))
if not inf:
ngcd = reduce(igcd, [t[0] for t in terms], 0)
dlcm = reduce(ilcm, [t[1] for t in terms], 1)
else:
ngcd = reduce(igcd, [t[0] for t in terms if t[1]], 0)
dlcm = reduce(ilcm, [t[1] for t in terms if t[1]], 1)
if ngcd == dlcm == 1:
return S.One, self
if not inf:
for i, (p, q, term) in enumerate(terms):
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
for i, (p, q, term) in enumerate(terms):
if q:
terms[i] = _keep_coeff(Rational((p//ngcd)*(dlcm//q)), term)
else:
terms[i] = _keep_coeff(Rational(p, q), term)
# we don't need a complete re-flattening since no new terms will join
# so we just use the same sort as is used in Add.flatten. When the
# coefficient changes, the ordering of terms may change, e.g.
# (3*x, 6*y) -> (2*y, x)
#
# We do need to make sure that term[0] stays in position 0, however.
#
if terms[0].is_Number or terms[0] is S.ComplexInfinity:
c = terms.pop(0)
else:
c = None
terms.sort(key=hash)
if c:
terms.insert(0, c)
return Rational(ngcd, dlcm), self._new_rawargs(*terms)
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self. If radical is True (default is False) then
common radicals will be removed and included as a factor of the
primitive expression.
Examples
========
>>> from sympy import sqrt
>>> (3 + 3*sqrt(2)).as_content_primitive()
(3, 1 + sqrt(2))
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
See docstring of Expr.as_content_primitive for more examples.
"""
con, prim = Add(*[_keep_coeff(*a.as_content_primitive(radical=radical)) for a in self.args]).primitive()
if radical and prim.is_Add:
# look for common radicals that can be removed
args = prim.args
rads = []
common_q = None
for m in args:
term_rads = defaultdict(list)
for ai in Mul.make_args(m):
if ai.is_Pow:
b, e = ai.as_base_exp()
if e.is_Rational and b.is_Integer and b > 0:
term_rads[e.q].append(int(b)**e.p)
if not term_rads:
break
if common_q is None:
common_q = set(term_rads.keys())
else:
common_q = common_q & set(term_rads.keys())
if not common_q:
break
rads.append(term_rads)
else:
# process rads
# keep only those in common_q
for r in rads:
for q in r.keys():
if q not in common_q:
r.pop(q)
for q in r:
r[q] = prod(r[q])
# find the gcd of bases for each q
G = []
for q in common_q:
g = reduce(igcd, [r[q] for r in rads], 0)
if g != 1:
G.append(g**Rational(1, q))
if G:
G = Mul(*G)
args = [ai/G for ai in args]
prim = G*Add(*args)
return con, prim
@property
def _sorted_args(self):
from sympy.utilities.misc import default_sort_key
return sorted(self.args, key=lambda w: default_sort_key(w))
from mul import Mul, _keep_coeff, prod
from sympy.core.numbers import Rational
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
import time
import unittest
import uuid
import os
import requests
import friend_docs
import user_docs
import limit_docs
def random_db_name():
return "mango_test_" + uuid.uuid4().hex
def has_text_service():
return os.path.isfile(os.getcwd() + "/../src/mango_cursor_text.erl")
class Database(object):
def __init__(self, host, port, dbname, auth=None):
self.host = host
self.port = port
self.dbname = dbname
self.sess = requests.session()
self.sess.auth = ('testuser', 'testpass')
self.sess.headers["Content-Type"] = "application/json"
@property
def url(self):
return "http://{}:{}/{}".format(self.host, self.port, self.dbname)
def path(self, parts):
if isinstance(parts, (str, unicode)):
parts = [parts]
return "/".join([self.url] + parts)
def create(self, q=1, n=3):
r = self.sess.get(self.url)
if r.status_code == 404:
r = self.sess.put(self.url, params={"q":q, "n": n})
r.raise_for_status()
def delete(self):
r = self.sess.delete(self.url)
def recreate(self):
self.delete()
time.sleep(1)
self.create()
time.sleep(1)
def save_doc(self, doc):
self.save_docs([doc])
def save_docs(self, docs, **kwargs):
body = json.dumps({"docs": docs})
r = self.sess.post(self.path("_bulk_docs"), data=body, params=kwargs)
r.raise_for_status()
for doc, result in zip(docs, r.json()):
doc["_id"] = result["id"]
doc["_rev"] = result["rev"]
def open_doc(self, docid):
r = self.sess.get(self.path(docid))
r.raise_for_status()
return r.json()
def ddoc_info(self, ddocid):
r = self.sess.get(self.path([ddocid, "_info"]))
r.raise_for_status()
return r.json()
def create_index(self, fields, idx_type="json", name=None, ddoc=None):
body = {
"index": {
"fields": fields
},
"type": idx_type,
"w": 3
}
if name is not None:
body["name"] = name
if ddoc is not None:
body["ddoc"] = ddoc
body = json.dumps(body)
r = self.sess.post(self.path("_index"), data=body)
r.raise_for_status()
assert r.json()["id"] is not None
assert r.json()["name"] is not None
return r.json()["result"] == "created"
def create_text_index(self, analyzer=None, selector=None, idx_type="text",
default_field=None, fields=None, name=None, ddoc=None,index_array_lengths=None):
body = {
"index": {
},
"type": idx_type,
"w": 3,
}
if name is not None:
body["name"] = name
if analyzer is not None:
body["index"]["default_analyzer"] = analyzer
if default_field is not None:
body["index"]["default_field"] = default_field
if index_array_lengths is not None:
body["index"]["index_array_lengths"] = index_array_lengths
if selector is not None:
body["selector"] = selector
if fields is not None:
body["index"]["fields"] = fields
if ddoc is not None:
body["ddoc"] = ddoc
body = json.dumps(body)
r = self.sess.post(self.path("_index"), data=body)
r.raise_for_status()
return r.json()["result"] == "created"
def list_indexes(self, limit="", skip=""):
if limit != "":
limit = "limit=" + str(limit)
if skip != "":
skip = "skip=" + str(skip)
r = self.sess.get(self.path("_index?"+limit+";"+skip))
r.raise_for_status()
return r.json()["indexes"]
def delete_index(self, ddocid, name, idx_type="json"):
path = ["_index", ddocid, idx_type, name]
r = self.sess.delete(self.path(path), params={"w":"3"})
r.raise_for_status()
def bulk_delete(self, docs):
body = {
"docids" : docs,
"w": 3
}
body = json.dumps(body)
r = self.sess.post(self.path("_index/_bulk_delete"), data=body)
return r.json()
def find(self, selector, limit=25, skip=0, sort=None, fields=None,
r=1, conflicts=False, use_index=None, explain=False,
bookmark=None, return_raw=False):
body = {
"selector": selector,
"use_index": use_index,
"limit": limit,
"skip": skip,
"r": r,
"conflicts": conflicts
}
if sort is not None:
body["sort"] = sort
if fields is not None:
body["fields"] = fields
if bookmark is not None:
body["bookmark"] = bookmark
body = json.dumps(body)
if explain:
path = self.path("_explain")
else:
path = self.path("_find")
r = self.sess.post(path, data=body)
r.raise_for_status()
if explain or return_raw:
return r.json()
else:
return r.json()["docs"]
def find_one(self, *args, **kwargs):
results = self.find(*args, **kwargs)
if len(results) > 1:
raise RuntimeError("Multiple results for Database.find_one")
if len(results):
return results[0]
else:
return None
class DbPerClass(unittest.TestCase):
@classmethod
def setUpClass(klass):
klass.db = Database("127.0.0.1", "15984", random_db_name())
klass.db.create(q=1, n=3)
def setUp(self):
self.db = self.__class__.db
class UserDocsTests(DbPerClass):
@classmethod
def setUpClass(klass):
super(UserDocsTests, klass).setUpClass()
user_docs.setup(klass.db)
class UserDocsTextTests(DbPerClass):
DEFAULT_FIELD = None
FIELDS = None
@classmethod
def setUpClass(klass):
super(UserDocsTextTests, klass).setUpClass()
if has_text_service():
user_docs.setup(
klass.db,
index_type="text",
default_field=klass.DEFAULT_FIELD,
fields=klass.FIELDS
)
class FriendDocsTextTests(DbPerClass):
@classmethod
def setUpClass(klass):
super(FriendDocsTextTests, klass).setUpClass()
if has_text_service():
friend_docs.setup(klass.db, index_type="text")
class LimitDocsTextTests(DbPerClass):
@classmethod
def setUpClass(klass):
super(LimitDocsTextTests, klass).setUpClass()
if has_text_service():
limit_docs.setup(klass.db, index_type="text")
|
|
"""SCons.Tool
SCons tool selection.
This looks for modules that define a callable object that can modify
a construction environment as appropriate for a given tool (or tool
chain).
Note that because this subsystem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "tool specification" in an arbitrary callable function. No
one needs to use or tie in to this subsystem in order to roll their own
tool definition.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import imp
import sys
import re
import os
import shutil
import SCons.Builder
import SCons.Errors
import SCons.Node.FS
import SCons.Scanner
import SCons.Scanner.C
import SCons.Scanner.D
import SCons.Scanner.LaTeX
import SCons.Scanner.Prog
DefaultToolpath=[]
CScanner = SCons.Scanner.C.CScanner()
DScanner = SCons.Scanner.D.DScanner()
LaTeXScanner = SCons.Scanner.LaTeX.LaTeXScanner()
PDFLaTeXScanner = SCons.Scanner.LaTeX.PDFLaTeXScanner()
ProgramScanner = SCons.Scanner.Prog.ProgramScanner()
SourceFileScanner = SCons.Scanner.Base({}, name='SourceFileScanner')
CSuffixes = [".c", ".C", ".cxx", ".cpp", ".c++", ".cc",
".h", ".H", ".hxx", ".hpp", ".hh",
".F", ".fpp", ".FPP",
".m", ".mm",
".S", ".spp", ".SPP", ".sx"]
DSuffixes = ['.d']
IDLSuffixes = [".idl", ".IDL"]
LaTeXSuffixes = [".tex", ".ltx", ".latex"]
for suffix in CSuffixes:
SourceFileScanner.add_scanner(suffix, CScanner)
for suffix in DSuffixes:
SourceFileScanner.add_scanner(suffix, DScanner)
# FIXME: what should be done here? Two scanners scan the same extensions,
# but look for different files, e.g., "picture.eps" vs. "picture.pdf".
# The builders for DVI and PDF explicitly reference their scanners
# I think that means this is not needed???
for suffix in LaTeXSuffixes:
SourceFileScanner.add_scanner(suffix, LaTeXScanner)
SourceFileScanner.add_scanner(suffix, PDFLaTeXScanner)
class Tool(object):
def __init__(self, name, toolpath=[], **kw):
self.name = name
self.toolpath = toolpath + DefaultToolpath
# remember these so we can merge them into the call
self.init_kw = kw
module = self._tool_module()
self.generate = module.generate
self.exists = module.exists
if hasattr(module, 'options'):
self.options = module.options
def _tool_module(self):
# TODO: Interchange zipimport with normal initilization for better error reporting
oldpythonpath = sys.path
sys.path = self.toolpath + sys.path
try:
try:
file, path, desc = imp.find_module(self.name, self.toolpath)
try:
return imp.load_module(self.name, file, path, desc)
finally:
if file:
file.close()
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
except ImportError:
pass
else:
for aPath in self.toolpath:
try:
importer = zipimport.zipimporter(aPath)
return importer.load_module(self.name)
except ImportError, e:
pass
finally:
sys.path = oldpythonpath
full_name = 'SCons.Tool.' + self.name
try:
return sys.modules[full_name]
except KeyError:
try:
smpath = sys.modules['SCons.Tool'].__path__
try:
file, path, desc = imp.find_module(self.name, smpath)
module = imp.load_module(full_name, file, path, desc)
setattr(SCons.Tool, self.name, module)
if file:
file.close()
return module
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
importer = zipimport.zipimporter( sys.modules['SCons.Tool'].__path__[0] )
module = importer.load_module(full_name)
setattr(SCons.Tool, self.name, module)
return module
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
def __call__(self, env, *args, **kw):
if self.init_kw is not None:
# Merge call kws into init kws;
# but don't bash self.init_kw.
if kw is not None:
call_kw = kw
kw = self.init_kw.copy()
kw.update(call_kw)
else:
kw = self.init_kw
env.Append(TOOLS = [ self.name ])
if hasattr(self, 'options'):
import SCons.Variables
if 'options' not in env:
from SCons.Script import ARGUMENTS
env['options']=SCons.Variables.Variables(args=ARGUMENTS)
opts=env['options']
self.options(opts)
opts.Update(env)
self.generate(env, *args, **kw)
def __str__(self):
return self.name
##########################################################################
# Create common executable program / library / object builders
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = ProgramScanner)
env['BUILDERS']['Program'] = program
return program
def createStaticLibBuilder(env):
"""This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ]
if env.Detect('ranlib'):
ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR")
action_list.append(ranlib_action)
static_lib = SCons.Builder.Builder(action = action_list,
emitter = '$LIBEMITTER',
prefix = '$LIBPREFIX',
suffix = '$LIBSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'StaticObject')
env['BUILDERS']['StaticLibrary'] = static_lib
env['BUILDERS']['Library'] = static_lib
return static_lib
def VersionShLibLinkNames(version, libname, env):
"""Generate names of symlinks to the versioned shared library"""
Verbose = False
platform = env.subst('$PLATFORM')
shlib_suffix = env.subst('$SHLIBSUFFIX')
shlink_flags = SCons.Util.CLVar(env.subst('$SHLINKFLAGS'))
linknames = []
if version.count(".") != 2:
# We need a version string of the form x.y.z to proceed
# Several changes need to be made to support versions like x.y
raise ValueError
if platform == 'darwin':
# For libfoo.x.y.z.dylib, linknames libfoo.so
suffix_re = re.escape('.' + version + shlib_suffix)
linkname = re.sub(suffix_re, shlib_suffix, libname)
if Verbose:
print "VersionShLibLinkNames: linkname = ",linkname
linknames.append(linkname)
elif platform == 'posix' or platform == 'sunos':
if sys.platform.startswith('openbsd'):
# OpenBSD uses x.y shared library versioning numbering convention
# and doesn't use symlinks to backwards-compatible libraries
return []
# For libfoo.so.x.y.z, linknames libfoo.so libfoo.so.x.y libfoo.so.x
suffix_re = re.escape(shlib_suffix + '.' + version)
# First linkname has no version number
linkname = re.sub(suffix_re, shlib_suffix, libname)
if Verbose:
print "VersionShLibLinkNames: linkname = ",linkname
linknames.append(linkname)
versionparts = version.split('.')
major_name = linkname + "." + versionparts[0]
minor_name = major_name + "." + versionparts[1]
#Only add link for major_name
#for linkname in [major_name, minor_name]:
for linkname in [major_name, ]:
if Verbose:
print "VersionShLibLinkNames: linkname ",linkname, ", target ",libname
linknames.append(linkname)
# note: no Windows case here (win32 or cygwin);
# MSVC doesn't support this type of versioned shared libs.
# (could probably do something for MinGW though)
return linknames
def VersionedSharedLibrary(target = None, source= None, env=None):
"""Build a shared library. If the environment has SHLIBVERSION
defined make a versioned shared library and create the appropriate
symlinks for the platform we are on"""
Verbose = False
try:
version = env.subst('$SHLIBVERSION')
except KeyError:
version = None
# libname includes the version number if one was given
libname = getattr(target[0].attributes, 'shlibname', target[0].name)
platform = env.subst('$PLATFORM')
shlib_suffix = env.subst('$SHLIBSUFFIX')
shlink_flags = SCons.Util.CLVar(env.subst('$SHLINKFLAGS'))
if Verbose:
print "VersionShLib: libname = ",libname
print "VersionShLib: platform = ",platform
print "VersionShLib: shlib_suffix = ",shlib_suffix
print "VersionShLib: target = ",str(target[0])
if version:
# set the shared library link flags
if platform == 'posix':
shlink_flags += [ '-Wl,-Bsymbolic' ]
# OpenBSD doesn't usually use SONAME for libraries
if not sys.platform.startswith('openbsd'):
# continue setup of shlink flags for all other POSIX systems
suffix_re = re.escape(shlib_suffix + '.' + version)
(major, age, revision) = version.split(".")
# soname will have only the major version number in it
soname = re.sub(suffix_re, shlib_suffix, libname) + '.' + major
shlink_flags += [ '-Wl,-soname=%s' % soname ]
if Verbose:
print " soname ",soname,", shlink_flags ",shlink_flags
elif platform == 'sunos':
suffix_re = re.escape(shlib_suffix + '.' + version)
(major, age, revision) = version.split(".")
soname = re.sub(suffix_re, shlib_suffix, libname) + '.' + major
shlink_flags += [ '-h', soname ]
elif platform == 'cygwin':
shlink_flags += [ '-Wl,-Bsymbolic',
'-Wl,--out-implib,${TARGET.base}.a' ]
elif platform == 'darwin':
shlink_flags += [ '-current_version', '%s' % version,
'-compatibility_version', '%s' % version,
'-undefined', 'dynamic_lookup' ]
if Verbose:
print "VersionShLib: shlink_flags = ",shlink_flags
envlink = env.Clone()
envlink['SHLINKFLAGS'] = shlink_flags
else:
envlink = env
result = SCons.Defaults.ShLinkAction(target, source, envlink)
if version:
# here we need the full pathname so the links end up in the right directory
libname = getattr(target[0].attributes, 'shlibpath', target[0].path)
if Verbose:
print "VerShLib: target lib is = ", libname
print "VerShLib: name is = ", target[0].name
print "VerShLib: dir is = ", target[0].dir.path
linknames = VersionShLibLinkNames(version, libname, env)
if Verbose:
print "VerShLib: linknames ",linknames
# Here we just need the file name w/o path as the target of the link
lib_ver = getattr(target[0].attributes, 'shlibname', target[0].name)
# make symlink of adjacent names in linknames
for count in range(len(linknames)):
linkname = linknames[count]
if count > 0:
try:
os.remove(lastlinkname)
except:
pass
os.symlink(os.path.basename(linkname),lastlinkname)
if Verbose:
print "VerShLib: made sym link of %s -> %s" % (lastlinkname,linkname)
lastlinkname = linkname
# finish chain of sym links with link to the actual library
if len(linknames)>0:
try:
os.remove(lastlinkname)
except:
pass
os.symlink(lib_ver,lastlinkname)
if Verbose:
print "VerShLib: made sym link of %s -> %s" % (linkname, lib_ver)
return result
# Fix http://scons.tigris.org/issues/show_bug.cgi?id=2903 :
# Ensure we still depend on SCons.Defaults.ShLinkAction command line which is $SHLINKCOM.
# This was tricky because we don't want changing LIBPATH to cause a rebuild, but
# changing other link args should. LIBPATH has $( ... $) around it but until this
# fix, when the varlist was added to the build sig those ignored parts weren't getting
# ignored.
ShLibAction = SCons.Action.Action(VersionedSharedLibrary, None, varlist=['SHLINKCOM'])
def createSharedLibBuilder(env):
"""This is a utility function that creates the SharedLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
shared_lib = env['BUILDERS']['SharedLibrary']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
ShLibAction ]
shared_lib = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = '$SHLIBPREFIX',
suffix = '$SHLIBSUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['SharedLibrary'] = shared_lib
return shared_lib
def createLoadableModuleBuilder(env):
"""This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction ]
ld_module = SCons.Builder.Builder(action = action_list,
emitter = "$LDMODULEEMITTER",
prefix = '$LDMODULEPREFIX',
suffix = '$LDMODULESUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module
def createObjBuilders(env):
"""This is a utility function that creates the StaticObject
and SharedObject Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (StaticObject, SharedObject)
"""
try:
static_obj = env['BUILDERS']['StaticObject']
except KeyError:
static_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['StaticObject'] = static_obj
env['BUILDERS']['Object'] = static_obj
try:
shared_obj = env['BUILDERS']['SharedObject']
except KeyError:
shared_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['SharedObject'] = shared_obj
return (static_obj, shared_obj)
def createCFileBuilders(env):
"""This is a utility function that creates the CFile/CXXFile
Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (CFile, CXXFile)
"""
try:
c_file = env['BUILDERS']['CFile']
except KeyError:
c_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CFILESUFFIX'})
env['BUILDERS']['CFile'] = c_file
env.SetDefault(CFILESUFFIX = '.c')
try:
cxx_file = env['BUILDERS']['CXXFile']
except KeyError:
cxx_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CXXFILESUFFIX'})
env['BUILDERS']['CXXFile'] = cxx_file
env.SetDefault(CXXFILESUFFIX = '.cc')
return (c_file, cxx_file)
##########################################################################
# Create common Java builders
def CreateJarBuilder(env):
try:
java_jar = env['BUILDERS']['Jar']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR')
java_jar = SCons.Builder.Builder(action = jar_com,
suffix = '$JARSUFFIX',
src_suffix = '$JAVACLASSSUFIX',
src_builder = 'JavaClassFile',
source_factory = fs.Entry)
env['BUILDERS']['Jar'] = java_jar
return java_jar
def CreateJavaHBuilder(env):
try:
java_javah = env['BUILDERS']['JavaH']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
java_javah_com = SCons.Action.Action('$JAVAHCOM', '$JAVAHCOMSTR')
java_javah = SCons.Builder.Builder(action = java_javah_com,
src_suffix = '$JAVACLASSSUFFIX',
target_factory = fs.Entry,
source_factory = fs.File,
src_builder = 'JavaClassFile')
env['BUILDERS']['JavaH'] = java_javah
return java_javah
def CreateJavaClassFileBuilder(env):
try:
java_class_file = env['BUILDERS']['JavaClassFile']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_file = SCons.Builder.Builder(action = javac_com,
emitter = {},
#suffix = '$JAVACLASSSUFFIX',
src_suffix = '$JAVASUFFIX',
src_builder = ['JavaFile'],
target_factory = fs.Entry,
source_factory = fs.File)
env['BUILDERS']['JavaClassFile'] = java_class_file
return java_class_file
def CreateJavaClassDirBuilder(env):
try:
java_class_dir = env['BUILDERS']['JavaClassDir']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_dir = SCons.Builder.Builder(action = javac_com,
emitter = {},
target_factory = fs.Dir,
source_factory = fs.Dir)
env['BUILDERS']['JavaClassDir'] = java_class_dir
return java_class_dir
def CreateJavaFileBuilder(env):
try:
java_file = env['BUILDERS']['JavaFile']
except KeyError:
java_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$JAVASUFFIX'})
env['BUILDERS']['JavaFile'] = java_file
env['JAVASUFFIX'] = '.java'
return java_file
class ToolInitializerMethod(object):
"""
This is added to a construction environment in place of a
method(s) normally called for a Builder (env.Object, env.StaticObject,
etc.). When called, it has its associated ToolInitializer
object search the specified list of tools and apply the first
one that exists to the construction environment. It then calls
whatever builder was (presumably) added to the construction
environment in place of this particular instance.
"""
def __init__(self, name, initializer):
"""
Note: we store the tool name as __name__ so it can be used by
the class that attaches this to a construction environment.
"""
self.__name__ = name
self.initializer = initializer
def get_builder(self, env):
"""
Returns the appropriate real Builder for this method name
after having the associated ToolInitializer object apply
the appropriate Tool module.
"""
builder = getattr(env, self.__name__)
self.initializer.apply_tools(env)
builder = getattr(env, self.__name__)
if builder is self:
# There was no Builder added, which means no valid Tool
# for this name was found (or possibly there's a mismatch
# between the name we were called by and the Builder name
# added by the Tool module).
return None
self.initializer.remove_methods(env)
return builder
def __call__(self, env, *args, **kw):
"""
"""
builder = self.get_builder(env)
if builder is None:
return [], []
return builder(*args, **kw)
class ToolInitializer(object):
"""
A class for delayed initialization of Tools modules.
Instances of this class associate a list of Tool modules with
a list of Builder method names that will be added by those Tool
modules. As part of instantiating this object for a particular
construction environment, we also add the appropriate
ToolInitializerMethod objects for the various Builder methods
that we want to use to delay Tool searches until necessary.
"""
def __init__(self, env, tools, names):
if not SCons.Util.is_List(tools):
tools = [tools]
if not SCons.Util.is_List(names):
names = [names]
self.env = env
self.tools = tools
self.names = names
self.methods = {}
for name in names:
method = ToolInitializerMethod(name, self)
self.methods[name] = method
env.AddMethod(method)
def remove_methods(self, env):
"""
Removes the methods that were added by the tool initialization
so we no longer copy and re-bind them when the construction
environment gets cloned.
"""
for method in self.methods.values():
env.RemoveMethod(method)
def apply_tools(self, env):
"""
Searches the list of associated Tool modules for one that
exists, and applies that to the construction environment.
"""
for t in self.tools:
tool = SCons.Tool.Tool(t)
if tool.exists(env):
env.Tool(tool)
return
# If we fall through here, there was no tool module found.
# This is where we can put an informative error message
# about the inability to find the tool. We'll start doing
# this as we cut over more pre-defined Builder+Tools to use
# the ToolInitializer class.
def Initializers(env):
ToolInitializer(env, ['install'], ['_InternalInstall', '_InternalInstallAs', '_InternalInstallVersionedLib'])
def Install(self, *args, **kw):
return self._InternalInstall(*args, **kw)
def InstallAs(self, *args, **kw):
return self._InternalInstallAs(*args, **kw)
def InstallVersionedLib(self, *args, **kw):
return self._InternalInstallVersionedLib(*args, **kw)
env.AddMethod(Install)
env.AddMethod(InstallAs)
env.AddMethod(InstallVersionedLib)
def FindTool(tools, env):
for tool in tools:
t = Tool(tool)
if t.exists(env):
return tool
return None
def FindAllTools(tools, env):
def ToolExists(tool, env=env):
return Tool(tool).exists(env)
return list(filter (ToolExists, tools))
def tool_list(platform, env):
other_plat_tools=[]
# XXX this logic about what tool to prefer on which platform
# should be moved into either the platform files or
# the tool files themselves.
# The search orders here are described in the man page. If you
# change these search orders, update the man page as well.
if str(platform) == 'win32':
"prefer Microsoft tools on Windows"
linkers = ['mslink', 'gnulink', 'ilink', 'linkloc', 'ilink32' ]
c_compilers = ['msvc', 'mingw', 'gcc', 'intelc', 'icl', 'icc', 'cc', 'bcc32' ]
cxx_compilers = ['msvc', 'intelc', 'icc', 'g++', 'c++', 'bcc32' ]
assemblers = ['masm', 'nasm', 'gas', '386asm' ]
fortran_compilers = ['gfortran', 'g77', 'ifl', 'cvf', 'f95', 'f90', 'fortran']
ars = ['mslib', 'ar', 'tlib']
other_plat_tools = ['msvs', 'midl']
elif str(platform) == 'os2':
"prefer IBM tools on OS/2"
linkers = ['ilink', 'gnulink', ]#'mslink']
c_compilers = ['icc', 'gcc',]# 'msvc', 'cc']
cxx_compilers = ['icc', 'g++',]# 'msvc', 'c++']
assemblers = ['nasm',]# 'masm', 'gas']
fortran_compilers = ['ifl', 'g77']
ars = ['ar',]# 'mslib']
elif str(platform) == 'irix':
"prefer MIPSPro on IRIX"
linkers = ['sgilink', 'gnulink']
c_compilers = ['sgicc', 'gcc', 'cc']
cxx_compilers = ['sgic++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['sgiar']
elif str(platform) == 'sunos':
"prefer Forte tools on SunOS"
linkers = ['sunlink', 'gnulink']
c_compilers = ['suncc', 'gcc', 'cc']
cxx_compilers = ['sunc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['sunf95', 'sunf90', 'sunf77', 'f95', 'f90', 'f77',
'gfortran', 'g77', 'fortran']
ars = ['sunar']
elif str(platform) == 'hpux':
"prefer aCC tools on HP-UX"
linkers = ['hplink', 'gnulink']
c_compilers = ['hpcc', 'gcc', 'cc']
cxx_compilers = ['hpc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'aix':
"prefer AIX Visual Age tools on AIX"
linkers = ['aixlink', 'gnulink']
c_compilers = ['aixcc', 'gcc', 'cc']
cxx_compilers = ['aixc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'aixf77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'darwin':
"prefer GNU tools on Mac OS X, except for some linkers and IBM tools"
linkers = ['applelink', 'gnulink']
c_compilers = ['gcc', 'cc']
cxx_compilers = ['g++', 'c++']
assemblers = ['as']
fortran_compilers = ['gfortran', 'f95', 'f90', 'g77']
ars = ['ar']
elif str(platform) == 'cygwin':
"prefer GNU tools on Cygwin, except for a platform-specific linker"
linkers = ['cyglink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
else:
"prefer GNU tools on all other platforms"
linkers = ['gnulink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
if not str(platform) == 'win32':
other_plat_tools += ['m4', 'rpm']
c_compiler = FindTool(c_compilers, env) or c_compilers[0]
# XXX this logic about what tool provides what should somehow be
# moved into the tool files themselves.
if c_compiler and c_compiler == 'mingw':
# MinGW contains a linker, C compiler, C++ compiler,
# Fortran compiler, archiver and assembler:
cxx_compiler = None
linker = None
assembler = None
fortran_compiler = None
ar = None
else:
# Don't use g++ if the C compiler has built-in C++ support:
if c_compiler in ('msvc', 'intelc', 'icc'):
cxx_compiler = None
else:
cxx_compiler = FindTool(cxx_compilers, env) or cxx_compilers[0]
linker = FindTool(linkers, env) or linkers[0]
assembler = FindTool(assemblers, env) or assemblers[0]
fortran_compiler = FindTool(fortran_compilers, env) or fortran_compilers[0]
ar = FindTool(ars, env) or ars[0]
d_compilers = ['dmd', 'gdc', 'ldc']
d_compiler = FindTool(d_compilers, env) or d_compilers[0]
other_tools = FindAllTools(other_plat_tools + [
#TODO: merge 'install' into 'filesystem' and
# make 'filesystem' the default
'filesystem',
'wix', #'midl', 'msvs',
# Parser generators
'lex', 'yacc',
# Foreign function interface
'rpcgen', 'swig',
# Java
'jar', 'javac', 'javah', 'rmic',
# TeX
'dvipdf', 'dvips', 'gs',
'tex', 'latex', 'pdflatex', 'pdftex',
# Archivers
'tar', 'zip',
# SourceCode factories
'BitKeeper', 'CVS', 'Perforce',
'RCS', 'SCCS', # 'Subversion',
], env)
tools = ([linker, c_compiler, cxx_compiler,
fortran_compiler, assembler, ar, d_compiler]
+ other_tools)
return [x for x in tools if x]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
from __future__ import unicode_literals
from django.db import models
from django.forms import ModelForm
from django.conf.global_settings import LANGUAGES
from django.contrib.auth.models import User
class CreatedUpdatedModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Skill( models.Model ):
user = models.ForeignKey(User)
name = models.CharField(max_length=4096)
def myToObj ( self ):
return { "id" : self.id , "name": self.name }
def fill ( self, data ):
self.name = data["name"]
def __str__( self ) :
return self.name
class PieceCategory( models.Model ):
user = models.ForeignKey(User)
name = models.CharField(max_length=4096, default="")
description = models.CharField(max_length=4096, default="")
language = models.CharField(max_length=7, choices=LANGUAGES, default="en")
tags = models.CharField(max_length=4096, default="")
pieces = models.TextField(default="")
def myToObj ( self ):
data = { "id" : self.id , "name": self.name, "description" : self.description }
data["language"] = self.language
data["tags"] = self.tags
data["pieces"] = self.pieces
return data
def fill ( self, data ):
self.name = data["name"]
self.description = data["description"]
self.tags = data["tags"]
self.language = data["language"]
self.pieces = data["pieces"]
def __str__( self ) :
return self.name
class Piece( models.Model ):
user = models.ForeignKey(User)
content = models.TextField()
language = models.CharField(max_length=7, choices=LANGUAGES)
tags = models.CharField(max_length=4096, default="")
legend = models.CharField(max_length=9064, default="")
def myToObj ( self ):
return { "id" : self.id , "content" : self.content, "language" : self.language, "tags" : self.tags, "legend" : self.legend }
def fill( self, data ) :
self.content = data["content"]
self.language = data["language"]
self.tags = data["tags"]
self.legend = data["legend"]
def __str__( self ) :
return self.content
class Cover( CreatedUpdatedModel ):
user = models.ForeignKey(User)
name = models.CharField(max_length=4096)
content = models.TextField(default="")
def myToObj ( self ):
return { "id" : self.id, "name" : self.name, "content" : self.content }
def fill( data ):
self.name = data.name
self.content = data.content
def __str__( self ) :
return self.name
class Application( CreatedUpdatedModel ):
user = models.ForeignKey(User)
portal = models.CharField(blank=True, max_length=200)
portal_link = models.CharField(blank=True, max_length=200, default="")
company = models.CharField(blank=True, max_length=200)
company_link = models.CharField(blank=True, max_length=200, default="")
position = models.CharField(blank=True, max_length=300)
position_link = models.CharField(blank=True, max_length=300, default="")
salary = models.CharField(blank=True, max_length=100)
contract = models.CharField(blank=True, max_length=300)
latitude = models.CharField(blank=True, max_length=20)
longitude = models.CharField(blank=True, max_length=20)
skills = models.CharField(blank=True, max_length=200)
written = models.BooleanField(default=False)
called = models.BooleanField(default=False)
interviewed = models.BooleanField(default=False)
followup = models.BooleanField(default=False)
notes = models.TextField(blank=True)
next = models.TextField(blank=True)
cover = models.TextField(blank=True)
address1 = models.CharField(blank=True, max_length=100)
address2 = models.CharField(blank=True, max_length=100)
c1name = models.CharField(blank=True, max_length=40)
c1mail = models.CharField(blank=True, max_length=40)
c1phone = models.CharField(blank=True, max_length=20)
c2name = models.CharField(blank=True, max_length=40)
c2mail = models.CharField(blank=True, max_length=40)
c2phone = models.CharField(blank=True, max_length=20)
c3name = models.CharField(blank=True, max_length=40)
c3mail = models.CharField(blank=True, max_length=40)
c3phone = models.CharField(blank=True, max_length=20)
c4name = models.CharField(blank=True, max_length=40)
c4mail = models.CharField(blank=True, max_length=40)
c4phone = models.CharField(blank=True, max_length=20)
def myToObj ( self ):
data = { "id" : self.id, "created" : self.created.strftime('%Y-%m-%d %H:%M') , "updated" : self.updated.strftime('%Y-%m-%d %H:%M') }
data["portal"] = self.portal
data["company"] = self.company
data["position"] = self.position
data["portal_link"] = self.portal_link
data["company_link"] = self.company_link
data["position_link"] = self.position_link
data["salary"] = self.salary
data["contract"] = self.contract
data["latitude"] = self.latitude
data["longitude"] = self.longitude
data["skills"] = self.skills
data["written"] = self.written
data["called"] = self.called
data["interviewed"] = self.interviewed
data["followup"] = self.followup
data["notes"] = self.notes
data["next"] = self.next
data["cover"] = self.cover
data["address1"] = self.address1
data["address2"] = self.address2
data["c1name"] = self.c1name
data["c1mail"] = self.c1mail
data["c1phone"] = self.c1phone
data["c2name"] = self.c2name
data["c2mail"] = self.c2mail
data["c2phone"] = self.c2phone
data["c3name"] = self.c3name
data["c3mail"] = self.c3mail
data["c3phone"] = self.c3phone
data["c4name"] = self.c4name
data["c4mail"] = self.c4mail
data["c4phone"] = self.c4phone
return data
def fill( self, data ) :
self.company = data["company"]
self.portal = data["portal"]
self.position = data["position"]
self.company_link = data["company_link"]
self.portal_link = data["portal_link"]
self.position_link = data["position_link"]
self.salary = data["salary"]
self.contract = data["contract"]
self.latitude = data["latitude"]
self.longitude = data["longitude"]
self.skills = data["skills"]
self.written = data["written"]
self.called = data["called"]
self.interviewed = data["interviewed"]
self.followup = data["followup"]
self.notes = data["notes"]
self.next = data["next"]
self.cover = data["cover"]
self.address1 = data["address1"]
self.address2 = data["address2"]
self.c1name = data["c1name"]
self.c1mail = data["c1mail"]
self.c1phone = data["c1phone"]
self.c2name = data["c2name"]
self.c2mail = data["c2mail"]
self.c2phone = data["c2phone"]
self.c3name = data["c3name"]
self.c3mail = data["c3mail"]
self.c3phone = data["c3phone"]
self.c4name = data["c4name"]
self.c4mail = data["c4mail"]
self.c4phone = data["c4phone"]
def __str__( self ) :
return self.company
# User
class Profile( CreatedUpdatedModel ):
user = models.OneToOneField(User)
uuid = models.UUIDField()
bio = models.TextField()
website = models.URLField(null=True)
has_avatar = models.BooleanField(default=False)
avatar = models.CharField(max_length=4096)
tutorial = models.IntegerField()
def myToObj ( self ):
data = {}
data["user"] = { "id" : self.user.id, "name" : self.user.name }
data["uuid"] = self.uuid
data["bio"] = self.bio
data["website"] = self.website
data["has_avatar"] = self.has_avatar
data["avatar"] = self.avatar
data["tutorial"] = self.tutorial
return data
def __str__( self ) :
return self.user
class ProfileForm(ModelForm):
class Meta:
model = Profile
fields = ['user', 'bio', 'website']
class SkillForm(ModelForm):
class Meta:
model = Skill
fields = ['name']
class ApplicationForm(ModelForm):
class Meta:
model = Application
fields = ['company','portal', 'position','skills', 'written', 'called', 'interviewed', 'followup', 'notes', 'next', 'cover', 'address1', 'address2', 'c1name', 'c1mail', 'c1phone', 'c2name', 'c2mail', 'c2phone','c3name', 'c3mail', 'c3phone','c4name', 'c4mail', 'c4phone']
class PieceCategoryForm(ModelForm):
class Meta:
model = PieceCategory
fields = ['name', 'description', 'tags', 'language', 'pieces']
class PieceForm(ModelForm):
class Meta:
model = Piece
fields = ['language', 'tags', 'legend', 'content']
|
|
from collections import defaultdict, Counter
import sys
DEBUG = False
def log(*arg):
"""
Logging function for debugging.
"""
if not DEBUG:
return
# print "DEBUG:",
for i in range(len(arg)):
print arg[i],
print
class Stack(object):
def __init__(self):
self.s = []
self.size = 0
def __str__(self):
return self.s.__str__()
def push(self, e):
self.s.append(e)
self.size += 1
def pop(self):
if self.empty():
return None
self.size -= 1
return self.s.pop()
def top(self):
if self.empty():
return None
return self.s[self.size-1]
def empty(self):
return self.size == 0
"""
Graph class
"""
class Graph(object):
def __init__(self):
self.edges = defaultdict(list)
self.vertices = set()
def add_vertex(self, v):
"""
Add vertex to graph.
"""
self.vertices.add(v)
def add_edge(self, s, t):
"""
Add edge to graph.
"""
self.edges[s].append(t)
def get_edges(self, v):
"""
Get edges coming from vertex v.
"""
return self.edges[v]
def sort_edges(self):
"""
Sort neighbor vertices in edges.
"""
for v in self.vertices:
self.edges[v].sort()
def get_reverse(self):
"""
Get graph with all arcs reversed.
"""
graph = Graph()
for v in self.vertices:
graph.add_vertex(v)
for w in self.edges[v]:
graph.add_vertex(w)
graph.add_edge(w, v)
return graph
"""
Strongly Connected Components class
"""
class SCC(object):
def __init__(self):
self.init()
def init(self):
"""
Initialize common structures.
"""
self.curr_time = 0
self.curr_leader = 0
self.leader = {}
self.explored = {}
self.fin_time = []
def DFS_loop(self, graph, vertices):
"""
DFS start function.
"""
log("vertices", vertices)
num_v = len(vertices)
self.init()
for v in vertices:
if v in self.explored and self.explored[v]:
continue
log("+ DFS_loop", v)
self.curr_leader = v
self.DFS(graph, v)
log(" explored", self.explored)
log(" leader", self.leader)
log(" fin_time", self.fin_time)
def DFS_it(self, graph, v):
"""
DFS recursive function.
Uses a stack to avoid maximum recursion depth exceeded.
"""
stack = Stack()
stack.push(v)
finished = Stack()
while not stack.empty():
v = stack.top()
if not v in self.explored or not self.explored[v]:
log(" - DFS", v) #, stack)
self.explored[v] = True
self.leader[v] = self.curr_leader
edges = graph.get_edges(v)[:]
log(" edges:", edges)
edges.reverse()
for w in edges:
if w in self.explored and self.explored[w]:
continue
stack.push(w)
finished.push(v)
else:
if v == finished.top():
self.fin_time.append(finished.pop())
stack.pop()
def DFS_rec(self, graph, v):
"""
DFS recursive function.
"""
log(" - DFS", v)
self.explored[v] = True
self.leader[v] = self.curr_leader
edges = graph.get_edges(v)
log(" edges:", edges)
for w in edges:
if w in self.explored and self.explored[w]:
continue
self.DFS(graph, w)
self.fin_time.append(v)
def SCC(self, graph):
"""
Main function to obtain the strongly connected components of a graph.
"""
g_rev = graph.get_reverse()
log("graph.edges", graph.edges.items())
log("g_rev.edges", g_rev.edges.items())
log("")
# Computer magical ordering of nodes
vertices = list(g_rev.vertices)
vertices.reverse()
self.DFS_loop(g_rev, vertices)
log("")
# Discover the SCCs one by one
vertices = self.fin_time
vertices.reverse()
self.DFS_loop(graph, vertices)
# Get SCCs
self.get_SCCs()
def get_SCCs(self):
"""
"""
self.scc_counter = Counter()
for _, v in self.leader.items():
self.scc_counter[v] += 1
def most_common(self):
"""
Gets the size of the biggest strongly connected components, in decreasing order.
"""
return self.scc_counter.most_common()
def sol_format(values, n):
"""
Formats the size of the n biggest SCCs using the format "A,B,C".
If the number of values is less than n, then the value 0 is used.
"""
res = [0]*n
# Get first n values
for i in range(min(len(values), n)):
res[i] = values[i][1]
# Format to string
s = str(res[0])
for i in range(1, n):
s = s + ",%d" % (res[i])
return s
if __name__ == "__main__":
filename = "SCC.txt"
if len(sys.argv) > 1:
filename = sys.argv[1]
graph = Graph()
with open(filename) as f:
for l in f:
v, w = l.split()
v, w = int(v, 10), int(w, 10)
graph.add_vertex(v)
graph.add_vertex(w)
graph.add_edge(v, w)
graph.sort_edges()
scc = SCC()
# use iterative DFS
scc.DFS = scc.DFS_it
# calculate SCCs
scc.SCC(graph)
print sol_format(scc.most_common(), 5)
|
|
"""Binary sensor platform for hvv_departures."""
from datetime import timedelta
import logging
from aiohttp import ClientConnectorError
import async_timeout
from pygti.exceptions import InvalidAuth
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import ATTRIBUTION, CONF_STATION, DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the binary_sensor platform."""
hub = hass.data[DOMAIN][entry.entry_id]
station_name = entry.data[CONF_STATION]["name"]
station = entry.data[CONF_STATION]
def get_elevator_entities_from_station_information(
station_name, station_information
):
"""Convert station information into a list of elevators."""
elevators = {}
if station_information is None:
return {}
for partial_station in station_information.get("partialStations", []):
for elevator in partial_station.get("elevators", []):
state = elevator.get("state") != "READY"
available = elevator.get("state") != "UNKNOWN"
label = elevator.get("label")
description = elevator.get("description")
if label is not None:
name = f"Elevator {label} at {station_name}"
else:
name = f"Unknown elevator at {station_name}"
if description is not None:
name += f" ({description})"
lines = elevator.get("lines")
idx = f"{station_name}-{label}-{lines}"
elevators[idx] = {
"state": state,
"name": name,
"available": available,
"attributes": {
"cabin_width": elevator.get("cabinWidth"),
"cabin_length": elevator.get("cabinLength"),
"door_width": elevator.get("doorWidth"),
"elevator_type": elevator.get("elevatorType"),
"button_type": elevator.get("buttonType"),
"cause": elevator.get("cause"),
"lines": lines,
ATTR_ATTRIBUTION: ATTRIBUTION,
},
}
return elevators
async def async_update_data():
"""Fetch data from API endpoint.
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
payload = {"station": station}
try:
async with async_timeout.timeout(10):
return get_elevator_entities_from_station_information(
station_name, await hub.gti.stationInformation(payload)
)
except InvalidAuth as err:
raise UpdateFailed(f"Authentication failed: {err}") from err
except ClientConnectorError as err:
raise UpdateFailed(f"Network not available: {err}") from err
except Exception as err:
raise UpdateFailed(f"Error occurred while fetching data: {err}") from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="hvv_departures.binary_sensor",
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(hours=1),
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
async_add_entities(
HvvDepartureBinarySensor(coordinator, idx, entry)
for (idx, ent) in coordinator.data.items()
)
class HvvDepartureBinarySensor(CoordinatorEntity, BinarySensorEntity):
"""HVVDepartureBinarySensor class."""
def __init__(self, coordinator, idx, config_entry):
"""Initialize."""
super().__init__(coordinator)
self.coordinator = coordinator
self.idx = idx
self.config_entry = config_entry
@property
def is_on(self):
"""Return entity state."""
return self.coordinator.data[self.idx]["state"]
@property
def should_poll(self):
"""No need to poll. Coordinator notifies entity of updates."""
return False
@property
def available(self):
"""Return if entity is available."""
return (
self.coordinator.last_update_success
and self.coordinator.data[self.idx]["available"]
)
@property
def device_info(self):
"""Return the device info for this sensor."""
return DeviceInfo(
identifiers={
(
DOMAIN,
self.config_entry.entry_id,
self.config_entry.data[CONF_STATION]["id"],
self.config_entry.data[CONF_STATION]["type"],
)
},
manufacturer=MANUFACTURER,
name=f"Departures at {self.config_entry.data[CONF_STATION]['name']}",
)
@property
def name(self):
"""Return the name of the sensor."""
return self.coordinator.data[self.idx]["name"]
@property
def unique_id(self):
"""Return a unique ID to use for this sensor."""
return self.idx
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return BinarySensorDeviceClass.PROBLEM
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if not (
self.coordinator.last_update_success
and self.coordinator.data[self.idx]["available"]
):
return None
return {
k: v
for k, v in self.coordinator.data[self.idx]["attributes"].items()
if v is not None
}
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update the entity.
Only used by the generic entity update service.
"""
await self.coordinator.async_request_refresh()
|
|
from __future__ import unicode_literals, division, absolute_import
import os
import shutil
import logging
import time
from flexget import plugin
from flexget.event import event
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
def get_directory_size(directory):
"""
:param directory: Path
:return: Size in bytes (recursively)
"""
dir_size = 0
for (path, dirs, files) in os.walk(directory):
for file in files:
filename = os.path.join(path, file)
dir_size += os.path.getsize(filename)
return dir_size
class BaseFileOps(object):
# Defined by subclasses
log = None
def on_task_output(self, task, config):
if config is True:
config = {}
elif config is False:
return
sexts = []
if 'along' in config:
sexts = [('.' + s).replace('..', '.').lower() for s in config['along']]
for entry in task.accepted:
if 'location' not in entry:
self.log.verbose('Cannot handle %s because it does not have the field location.' % entry['title'])
continue
src = entry['location']
src_isdir = os.path.isdir(src)
try:
# check location
if not os.path.exists(src):
raise plugin.PluginWarning('location `%s` does not exists (anymore).' % src)
if src_isdir:
if not config.get('allow_dir'):
raise plugin.PluginWarning('location `%s` is a directory.' % src)
elif not os.path.isfile(src):
raise plugin.PluginWarning('location `%s` is not a file.' % src)
# search for namesakes
siblings = []
if not src_isdir and 'along' in config:
src_file, src_ext = os.path.splitext(src)
for ext in sexts:
if ext != src_ext.lower() and os.path.exists(src_file + ext):
siblings.append(src_file + ext)
# execute action in subclasses
self.handle_entry(task, config, entry, siblings)
except Exception as err:
entry.fail(str(err))
continue
def clean_source(self, task, config, entry):
min_size = entry.get('clean_source', config.get('clean_source', -1))
if min_size < 0:
return
base_path = os.path.split(entry.get('old_location', entry['location']))[0]
# everything here happens after a successful execution of the main action: the entry has been moved in a
# different location, or it does not exists anymore. so from here we can just log warnings and move on.
if not os.path.isdir(base_path):
self.log.warning('Cannot delete path `%s` because it does not exists (anymore).' % base_path)
return
dir_size = get_directory_size(base_path) / 1024 / 1024
if dir_size >= min_size:
self.log.info('Path `%s` left because it exceeds safety value set in clean_source option.' % base_path)
return
if task.options.test:
self.log.info('Would delete `%s` and everything under it.' % base_path)
return
try:
shutil.rmtree(base_path)
self.log.info('Path `%s` has been deleted because was less than clean_source safe value.' % base_path)
except Exception as err:
self.log.warning('Unable to delete path `%s`: %s' % (base_path, err))
class DeleteFiles(BaseFileOps):
"""Delete all accepted files."""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'allow_dir': {'type': 'boolean'},
'along': {'type': 'array', 'items': {'type': 'string'}},
'clean_source': {'type': 'number'}
},
'additionalProperties': False
}
]
}
log = logging.getLogger('delete')
def handle_entry(self, task, config, entry, siblings):
src = entry['location']
src_isdir = os.path.isdir(src)
if task.options.test:
if src_isdir:
self.log.info('Would delete `%s` and all its content.' % src)
else:
self.log.info('Would delete `%s`' % src)
for s in siblings:
self.log.info('Would also delete `%s`' % s)
return
# IO errors will have the entry mark failed in the base class
if src_isdir:
shutil.rmtree(src)
self.log.info('`%s` and all its content has been deleted.' % src)
else:
os.remove(src)
self.log.info('`%s` has been deleted.' % src)
# further errors will not have any effect (the entry does not exists anymore)
for s in siblings:
try:
os.remove(s)
self.log.info('`%s` has been deleted as well.' % s)
except Exception as err:
self.log.warning(str(err))
if not src_isdir:
self.clean_source(task, config, entry)
class TransformingOps(BaseFileOps):
# Defined by subclasses
move = None
destination_field = None
def handle_entry(self, task, config, entry, siblings):
src = entry['location']
src_isdir = os.path.isdir(src)
src_path, src_name = os.path.split(src)
# get the proper path and name in order of: entry, config, above split
dst_path = entry.get(self.destination_field, config.get('to', src_path))
if entry.get('filename') and entry['filename'] != src_name:
# entry specifies different filename than what was split from the path
# since some inputs fill in filename it must be different in order to be used
dst_name = entry['filename']
else:
dst_name = config.get('filename', src_name)
try:
dst_path = entry.render(dst_path)
except RenderError as err:
raise plugin.PluginWarning('Path value replacement `%s` failed: %s' % (dst_path, err.args[0]))
try:
dst_name = entry.render(dst_name)
except RenderError as err:
raise plugin.PluginWarning('Filename value replacement `%s` failed: %s' % (dst_name, err.args[0]))
# Clean invalid characters with pathscrub plugin
dst_path = pathscrub(os.path.expanduser(dst_path))
dst_name = pathscrub(dst_name, filename=True)
# Join path and filename
dst = os.path.join(dst_path, dst_name)
if dst == entry['location']:
raise plugin.PluginWarning('source and destination are the same.')
if not os.path.exists(dst_path):
if task.options.test:
self.log.info('Would create `%s`' % dst_path)
else:
self.log.info('Creating destination directory `%s`' % dst_path)
os.makedirs(dst_path)
if not os.path.isdir(dst_path) and not task.options.test:
raise plugin.PluginWarning('destination `%s` is not a directory.' % dst_path)
# unpack_safety
if config.get('unpack_safety', entry.get('unpack_safety', True)):
count = 0
while True:
if count > 60 * 30:
raise plugin.PluginWarning('The task has been waiting unpacking for 30 minutes')
size = os.path.getsize(src)
time.sleep(1)
new_size = os.path.getsize(src)
if size != new_size:
if not count % 10:
self.log.verbose('File `%s` is possibly being unpacked, waiting ...' % src_name)
else:
break
count += 1
src_file, src_ext = os.path.splitext(src)
dst_file, dst_ext = os.path.splitext(dst)
# Check dst contains src_ext
if config.get('keep_extension', entry.get('keep_extension', True)):
if not src_isdir and dst_ext != src_ext:
self.log.verbose('Adding extension `%s` to dst `%s`' % (src_ext, dst))
dst += src_ext
funct_name = 'move' if self.move else 'copy'
funct_done = 'moved' if self.move else 'copied'
if task.options.test:
self.log.info('Would %s `%s` to `%s`' % (funct_name, src, dst))
for s in siblings:
# we cannot rely on splitext for extensions here (subtitles may have the language code)
d = dst_file + s[len(src_file):]
self.log.info('Would also %s `%s` to `%s`' % (funct_name, s, d))
else:
# IO errors will have the entry mark failed in the base class
if self.move:
shutil.move(src, dst)
elif src_isdir:
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
self.log.info('`%s` has been %s to `%s`' % (src, funct_done, dst))
# further errors will not have any effect (the entry has been successfully moved or copied out)
for s in siblings:
# we cannot rely on splitext for extensions here (subtitles may have the language code)
d = dst_file + s[len(src_file):]
try:
if self.move:
shutil.move(s, d)
else:
shutil.copy(s, d)
self.log.info('`%s` has been %s to `%s` as well.' % (s, funct_done, d))
except Exception as err:
self.log.warning(str(err))
entry['old_location'] = src
entry['location'] = dst
entry['output'] = dst
if self.move and not src_isdir:
self.clean_source(task, config, entry)
class CopyFiles(TransformingOps):
"""Copy all accepted files."""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'filename': {'type': 'string'},
'allow_dir': {'type': 'boolean'},
'unpack_safety': {'type': 'boolean'},
'keep_extension': {'type': 'boolean'},
'along': {'type': 'array', 'items': {'type': 'string'}}
},
'additionalProperties': False
}
]
}
move = False
destination_field = 'copy_to'
log = logging.getLogger('copy')
class MoveFiles(TransformingOps):
"""Move all accepted files."""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'filename': {'type': 'string'},
'allow_dir': {'type': 'boolean'},
'unpack_safety': {'type': 'boolean'},
'keep_extension': {'type': 'boolean'},
'along': {'type': 'array', 'items': {'type': 'string'}},
'clean_source': {'type': 'number'}
},
'additionalProperties': False
}
]
}
move = True
destination_field = 'move_to'
log = logging.getLogger('move')
@event('plugin.register')
def register_plugin():
plugin.register(DeleteFiles, 'delete', api_ver=2)
plugin.register(CopyFiles, 'copy', api_ver=2)
plugin.register(MoveFiles, 'move', api_ver=2)
|
|
# Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from mock import call, Mock, patch, sentinel
from dirigible.test_utils import die, ResolverTestCase
from sheet.cell import Cell
from sheet.dependency_graph import (
_add_location_dependencies, build_dependency_graph,
_generate_cell_subgraph, Node)
from sheet.errors import (
CycleError, report_cell_error,
)
from sheet.worksheet import Worksheet
class TestBuildDependencyGraph(ResolverTestCase):
def test_returns_graph_and_leaf_nodes(self):
worksheet = Worksheet()
worksheet[1, 1].formula = '=A2 + B2'
worksheet[1, 2].formula = '=A3'
worksheet[2, 2].formula = '=B3'
worksheet[1, 3].formula = '=1'
worksheet[2, 3].formula = '1'
worksheet[3, 3].python_formula = '1'
graph, leaves = build_dependency_graph(worksheet)
self.maxDiff = None
self.assertEquals(
graph,
{
(1, 1): Node(
(1, 1),
children=set([(1, 2), (2, 2)]),
parents=set()
),
(1, 2): Node(
(1, 2),
children=set([(1, 3)]),
parents=set([(1, 1)])
),
(2, 2): Node(
(2, 2),
children=set(),
parents=set([(1, 1)])
),
(1, 3): Node(
(1, 3),
children=set(),
parents=set([(1, 2)])
),
(3, 3): Node(
(3, 3),
children=set(),
parents=set()
),
}
)
self.assertEquals(set(leaves), set([(1, 3), (2, 2), (3, 3)]))
worksheet[1, 2].formula = '=A3 + B3'
graph, leaves = build_dependency_graph(worksheet)
self.assertEquals(graph, {
(1, 1): Node(
(1, 1),
children=set([(1, 2), (2, 2)]),
parents=set(),
),
(1, 2): Node(
(1, 2),
children=set([(1, 3)]),
parents=set([(1, 1)]),
),
(2, 2): Node(
(2, 2),
children=set(),
parents=set([(1, 1)])
),
(1, 3): Node(
(1, 3),
children=set(),
parents=set([(1, 2)])
),
(3, 3): Node(
(3, 3),
children=set(),
parents=set()
),
})
self.assertEquals(set(leaves), set([(1, 3), (2, 2), (3, 3)]))
def test_is_robust_against_references_to_empty_cells(self):
worksheet = Worksheet()
worksheet[1, 1].formula = '=A2'
# NB we're making sure that this call doesn't raise an error
# because the cell A2 is created in the dictionary while we're
# iterating over it.
graph, leaves = build_dependency_graph(worksheet)
self.maxDiff = None
self.assertEquals(
graph,
{
(1, 1): Node(
(1, 1),
children=set(),
parents=set()
)
}
)
self.assertEquals(leaves, [(1, 1)])
@patch('sheet.dependency_graph.report_cell_error')
def test_puts_errors_on_cells_in_cycles_and_omits_them_from_graph(self, mock_report_cell_error):
mock_report_cell_error.side_effect = report_cell_error
worksheet = Worksheet()
worksheet[1, 1].formula = '=A2'
worksheet[1, 2].formula = '=A1'
worksheet[1, 3].formula = '=A1'
worksheet[1, 4].formula = '=A5'
worksheet[1, 5].formula = '=5'
graph, leaves = build_dependency_graph(worksheet)
self.assertEquals(
graph,
{
(1, 3): Node((1, 3), children=set(), parents=set()),
(1, 4): Node((1, 4), children=set([(1, 5)]), parents=set()),
(1, 5): Node((1, 5), children=set(), parents=set([(1, 4)])),
}
)
self.assertEquals(leaves, [(1, 5), (1, 3)])
a1_cycle_error = CycleError([(1, 2), (1, 1), (1, 2)])
self.assertEquals(
mock_report_cell_error.call_args_list,
[
call(worksheet, (1, 2), a1_cycle_error),
call(worksheet, (1, 1), a1_cycle_error),
]
)
class TestGenerateCellSubgraph(ResolverTestCase):
@patch('sheet.dependency_graph._generate_cell_subgraph')
@patch('sheet.dependency_graph._add_location_dependencies')
def test_should_recursively_call_itself_on_dependencies_before_adding_dependencies_to_graph(
self, mock_add_location_dependencies, mock_generate_cell_subgraph
):
mock_generate_cell_subgraph.copied_call_args_list = []
def mock_recalc_recursive_call(worksheet, context, loc, visited, path):
self.assertFalse(mock_add_location_dependencies.called)
mock_generate_cell_subgraph.copied_call_args_list.append((worksheet, context, loc, set(visited), list(path)))
mock_generate_cell_subgraph.side_effect = mock_recalc_recursive_call
mock_generate_cell_subgraph_was_called_before_add_location_dependencies = []
def add_location_dependencies_side_effect(*_):
mock_generate_cell_subgraph_was_called_before_add_location_dependencies.append(mock_generate_cell_subgraph.called)
mock_add_location_dependencies.side_effect = add_location_dependencies_side_effect
worksheet = Worksheet()
worksheet[1, 11].formula = '=formula'
worksheet[1, 11].dependencies = [(2, 22), (3, 33)]
context = sentinel.context
_generate_cell_subgraph(worksheet, context, (1, 11), set(), [])
self.assertTrue(mock_add_location_dependencies.called)
self.assertTrue(mock_generate_cell_subgraph_was_called_before_add_location_dependencies[0])
self.assertItemsEqual(
mock_generate_cell_subgraph.copied_call_args_list,
[
(worksheet, context, (2, 22), set(), [(1, 11)]),
(worksheet, context, (3, 33), set(), [(1, 11)]),
]
)
@patch('sheet.dependency_graph._add_location_dependencies')
def test_should_add_dependencies_to_graph(
self, mock_add_location_dependencies
):
worksheet = Worksheet()
worksheet[99, 98].formula = '=foobar'
worksheet[1, 11].formula = '=foo'
worksheet[1, 11].dependencies = [(99, 98)]
graph = sentinel.graph
_generate_cell_subgraph(worksheet, graph, (1, 11), set(), [])
self.assertEqual(
mock_add_location_dependencies.call_args,
((graph, (1, 11), set([(99, 98)])), {}),
)
@patch('sheet.dependency_graph._add_location_dependencies')
def test_should_remove_dependencies_with_errors_and_empty_cells(
self, mock_add_location_dependencies
):
worksheet = Worksheet()
worksheet[1, 1].formula = '1'
worksheet[1, 2].error = CycleError([])
worksheet[1, 3].error = SyntaxError('')
worksheet[1, 11].formula = '=foo'
worksheet[1, 11].dependencies = [(1, 1), (1, 2), (1, 3), (1, 4)]
graph = sentinel.graph
_generate_cell_subgraph(worksheet, graph, (1, 11), set(), [])
self.assertCalledOnce(mock_add_location_dependencies,
graph, (1, 11), set())
@patch('sheet.dependency_graph._generate_cell_subgraph', die(CycleError([])))
@patch('sheet.dependency_graph._add_location_dependencies')
@patch('sheet.dependency_graph.report_cell_error')
def test_should_report_cell_error_and_not_add_location_on_recursive_call_raising_cycle_error_if_location_is_not_in_cycle_path(
self, mock_report_cell_error, mock_add_location_dependencies
):
worksheet = Worksheet()
worksheet[1, 11].formula = '=A12'
worksheet[1, 11].dependencies = [(1, 12)]
_generate_cell_subgraph(worksheet, sentinel.graph, (1, 11), set(), [])
self.assertCalledOnce(mock_add_location_dependencies, sentinel.graph, (1, 11), set())
self.assertCalledOnce(mock_report_cell_error, worksheet, (1, 11), CycleError([]))
@patch('sheet.dependency_graph._add_location_dependencies')
def test_should_add_cell_to_graph_if_formula_not_set_but_python_formula_is(
self, mock_add_location_dependencies
):
worksheet = Worksheet()
worksheet[1, 2].python_formula = 'blerk'
_generate_cell_subgraph(worksheet, sentinel.graph, (1, 2), set(), [])
self.assertCalledOnce(mock_add_location_dependencies, sentinel.graph, (1, 2), set())
@patch('sheet.dependency_graph._add_location_dependencies')
def test_should_not_reprocess_locations_already_in_visited_even_if_it_is_in_worksheet(
self, mock_add_location_dependencies
):
cell = Cell()
cell.formula = 'constant'
worksheet = Worksheet()
worksheet[1, 2] = cell
_generate_cell_subgraph(worksheet, sentinel.graph, (1, 2), set([(1, 2)]), [])
self.assertFalse(mock_add_location_dependencies.called)
@patch('sheet.dependency_graph._generate_cell_subgraph')
@patch('sheet.dependency_graph._add_location_dependencies', Mock())
def test_should_add_location_to_visited_set_after_recursing_deps(
self, mock_generate_cell_subgraph
):
visited = set()
visited_set_at_time_of_recursive_call = []
# NB: Clone visited or changes will be reflected in the one we store
mock_generate_cell_subgraph.side_effect = lambda *_: visited_set_at_time_of_recursive_call.append(set(visited))
worksheet = Worksheet()
worksheet[1, 2].formula = '=23'
worksheet[1, 2].dependencies = [(3, 4)]
_generate_cell_subgraph(worksheet, sentinel.graph, (1, 2), visited, [])
self.assertEquals(visited_set_at_time_of_recursive_call[0], set())
self.assertEquals(visited, set([(1, 2)]))
self.assertTrue(mock_generate_cell_subgraph.called)
def test_should_safely_handle_nonexistent_location(self):
empty_worksheet = {}
_generate_cell_subgraph(empty_worksheet, sentinel.graph, (1, 2), set(), [])
@patch('sheet.dependency_graph.report_cell_error')
def test_should_report_then_raise_cycle_error_when_there_is_a_cycle(
self, mock_report_cell_error
):
cycle_error = CycleError([(1, 2), (3, 4), (1, 2)])
worksheet = Worksheet()
worksheet[1, 2].formula = "=foo"
visited = set()
try:
_generate_cell_subgraph(worksheet, sentinel.graph, (1, 2), visited, [(8, 9), (1, 2), (3, 4)])
except Exception, e:
self.assertEquals(e, cycle_error)
else:
self.fail("No Exception raised")
self.assertCalledOnce(mock_report_cell_error, worksheet, (1, 2), cycle_error)
self.assertEquals(visited, set([(1, 2)]))
def test_should_raise_any_existing_cycle_error_for_visited_locations(self):
cycle_error = CycleError([(1, 2), (3, 4), (1, 2)])
worksheet = Worksheet()
worksheet[1, 2].error = cycle_error
try:
_generate_cell_subgraph(worksheet, sentinel.graph, (1, 2), set([(1, 2)]), sentinel.path)
except Exception, e:
self.assertEquals(e, cycle_error)
else:
self.fail("No Exception raised")
@patch('sheet.dependency_graph._generate_cell_subgraph')
@patch('sheet.dependency_graph.report_cell_error')
@patch('sheet.dependency_graph._add_location_dependencies', Mock())
def test_should_reraise_cycle_error_after_reporting_if_its_in_the_cycle_path(
self, mock_report_cell_error, mock_recursive_call
):
cycle_error = CycleError([(1, 2), (3, 4), (1, 2)])
worksheet = Worksheet()
worksheet[1, 2].formula = "=C4"
mock_recursive_call.side_effect = die(cycle_error)
visited = set()
try:
_generate_cell_subgraph(worksheet, sentinel.graph, (1, 2), visited, [])
except Exception, e:
self.assertEquals(e, cycle_error)
else:
self.fail("No Exception raised")
self.assertCalledOnce(mock_report_cell_error, worksheet, (1, 2), cycle_error)
self.assertEquals(visited, set([(1, 2)]))
@patch('sheet.dependency_graph._generate_cell_subgraph')
@patch('sheet.dependency_graph.report_cell_error')
@patch('sheet.dependency_graph._add_location_dependencies', Mock())
def test_should_not_reraise_cycle_error_if_its_outside_the_cycle_path(
self, mock_report_cell_error, mock_recursive_call
):
cycle_error = CycleError([(1, 2), (3, 4), (1, 2)])
worksheet = Worksheet()
worksheet[1, 3].formula = "=foo"
mock_recursive_call.side_effect = die(cycle_error)
_generate_cell_subgraph(worksheet, sentinel.graph, (1, 3), set(), []) # should not raise
@patch('sheet.dependency_graph._generate_cell_subgraph')
def test_should_not_recurse_into_existing_cycle_errors_or_include_them_in_its_deps(
self, mock_recursive_call
):
cycle_error = CycleError([(1, 2), (3, 4), (1, 2)])
worksheet = Worksheet()
worksheet[1, 2].error = cycle_error
worksheet[3, 4].error = cycle_error
worksheet[1, 3].formula = "=foo"
worksheet[1, 3].dependencies = [(3, 4)]
visited = set([(1, 2), (3, 4)])
graph = {}
_generate_cell_subgraph(worksheet, graph, (1, 3), visited, [])
dep_cell_calls = [c[0][2] for c in mock_recursive_call.call_args_list]
self.assertNotIn(dep_cell_calls, (3, 4))
self.assertEquals(visited, set([(1, 2), (1, 3), (3, 4)]))
self.assertEquals(graph, {(1, 3): Node((1, 3), set())})
def test_does_not_include_discovered_cycle_in_deps_of_current_cell(self):#
worksheet = Worksheet()
worksheet[1, 1].formula = '=A2'
worksheet[1, 2].formula = '=A1'
worksheet[1, 3].formula = '=A1'
visited = set()
graph = {}
_generate_cell_subgraph(worksheet, graph, (1, 3), visited, [])
self.assertEquals(graph, {(1, 3): Node((1, 3), set())})
self.assertEquals(visited, set([(1, 2), (1, 3), (1, 1)]))
@patch('sheet.dependency_graph.report_cell_error')
def test_reports_error_once_per_cell(self, mock_report_cell_error):
mock_report_cell_error.side_effect = report_cell_error
worksheet = Worksheet()
worksheet[1, 1].formula = '=A2'
worksheet[1, 2].formula = '=A1'
try:
_generate_cell_subgraph(worksheet, {}, (1, 1), set(), [])
except CycleError:
pass
self.assertEquals(len(mock_report_cell_error.call_args_list), 2)
class TestDependencyGraphNode(ResolverTestCase):
def test_constructor(self):
self.assertRaises(TypeError, lambda: Node())
n1 = Node((1, 2))
self.assertEquals(n1.location, (1, 2))
self.assertEquals(n1.children, set())
self.assertEquals(n1.parents, set())
n2 = Node((2, 3), children=set([1, 2, 3]))
self.assertEquals(n2.location, (2, 3))
self.assertEquals(n2.children, set([1, 2, 3]))
self.assertEquals(n2.parents, set())
n3 = Node((4, 5), parents=set([1, 2, 3]))
self.assertEquals(n3.location, (4, 5))
self.assertEquals(n3.children, set())
self.assertEquals(n3.parents, set([1, 2, 3]))
def test_nodes_should_have_a_lock(self):
node = Node((1, 2))
self.assertIsNotNone(node.lock.acquire)
self.assertIsNotNone(node.lock.release)
def test_equality(self):
n1 = Node((1, 2), children=set([1]))
n1.parents = set([2])
n2 = Node((1, 2), children=set([1]))
n2.parents = set([2])
self.assertTrue(n1 == n2)
self.assertFalse(n1 != n2)
n2.location = (3, 4)
self.assertFalse(n1 == n2)
self.assertTrue(n1 != n2)
n2.location = (1, 2)
n2.parents = set([3])
self.assertFalse(n1 == n2)
self.assertTrue(n1 != n2)
n2.children = set([3])
self.assertFalse(n1 == n2)
self.assertTrue(n1 != n2)
n2.parents = set([2])
self.assertFalse(n1 == n2)
self.assertTrue(n1 != n2)
def test_repr(self):
self.assertEquals(
str(Node((1, 2), children=set([1, 2, 3]))),
"<Node 1,2 children={1, 2, 3} parents={}>"
)
def test_remove_should_acquire_lock_on_parent_nodes(self):
parent1 = Node((1, 2))
parent2 = Node((2, 3))
node = Node((3, 4), parents=set([(1, 2), (2, 3)]))
parent1.children = set([(3, 4)])
parent2.children = set([(3, 4)])
leaf_queue = Mock()
parent1.lock = Mock()
parent1.lock.aquire.side_effect = lambda: self.assertTrue(node in parent1.children)
parent1.lock.release.side_effect = lambda: self.assertFalse(node in parent1.children)
parent2.lock = Mock()
parent2.lock.aquire.side_effect = lambda: self.assertTrue(node in parent2.children)
parent2.lock.release.side_effect = lambda: self.assertFalse(node in parent2.children)
node.remove_from_parents([parent1, parent2], leaf_queue)
self.assertTrue(parent1.lock.acquire.called)
self.assertTrue(parent1.lock.release.called)
self.assertTrue(parent2.lock.acquire.called)
self.assertTrue(parent2.lock.release.called)
self.assertEquals(
leaf_queue.put.call_args_list,
[call((1, 2)), call((2, 3))]
)
def test_remove_should_add_new_leaves_to_queue(self):
parent = Node((1, 2))
child1 = Node((2, 3), parents=set([parent.location]))
child2 = Node((3, 4), parents=set([parent.location]))
parent.children = set([child1.location, child2.location])
leaf_queue = Mock()
child1.remove_from_parents([parent], leaf_queue)
self.assertFalse(leaf_queue.put.called)
child2.remove_from_parents([parent], leaf_queue)
self.assertEquals(leaf_queue.put.call_args, ((parent.location,), {}))
class TestAddLocationDependencies(ResolverTestCase):
def test_add_location_dependencies_does(self):
graph = {}
dependencies = set([sentinel.dependencies])
_add_location_dependencies(graph, sentinel.location, dependencies)
self.assertEquals(type(graph[sentinel.location]), Node)
self.assertEquals(graph[sentinel.location].children, dependencies)
def test_add_location_dependencies_also_adds_reverse_dependencies(self):
graph = {}
parent_loc = (1, 2)
child1_loc = (2, 3)
child2_loc = (3, 4)
grandchild_loc = (4, 5)
_add_location_dependencies(graph, parent_loc, set([child1_loc, child2_loc]))
expected = {
parent_loc: Node(parent_loc, children=set([child1_loc, child2_loc])),
child1_loc: Node(child1_loc, parents=set([parent_loc])),
child2_loc: Node(child2_loc, parents=set([parent_loc])),
}
self.assertEquals(expected, graph)
_add_location_dependencies(graph, grandchild_loc, set())
expected = {
parent_loc: Node(parent_loc, children=set([child1_loc, child2_loc])),
child1_loc: Node(child1_loc, parents=set([parent_loc])),
child2_loc: Node(child2_loc, parents=set([parent_loc])),
grandchild_loc: Node(grandchild_loc),
}
self.assertEquals(expected, graph)
_add_location_dependencies(graph, child1_loc, set([grandchild_loc]))
expected = {
parent_loc: Node(parent_loc, children=set([child1_loc, child2_loc])),
child1_loc: Node(
child1_loc,
children=set([grandchild_loc]),
parents=set([parent_loc])
),
child2_loc: Node(child2_loc, parents=set([parent_loc])),
grandchild_loc: Node(grandchild_loc, parents=set([child1_loc])),
}
self.assertEquals(expected, graph)
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import os
import sys
from atomic_reactor.util import ImageName
from atomic_reactor.inner import PushConf
try:
if sys.version_info.major > 2:
# importing dockpulp in Python 3 causes SyntaxError
raise ImportError
import dockpulp
except ImportError:
import inspect
# Find our dockpulp stub
import tests.mock.dockpulp as dockpulp
mock_dockpulp_path = os.path.dirname(inspect.getfile(dockpulp.Pulp))
if mock_dockpulp_path not in sys.path:
sys.path.insert(0, os.path.dirname(mock_dockpulp_path))
# Now load it properly, the same way the plugin will
del dockpulp
import dockpulp
from atomic_reactor.plugins.post_pulp_sync import PulpSyncPlugin
from atomic_reactor.constants import PLUGIN_PULP_PUSH_KEY
from flexmock import flexmock
import json
import pytest
class MockPulp(object):
"""
Mock dockpulp.Pulp object
"""
registry = 'pulp.example.com'
def login(self, username, password):
pass
def set_certs(self, cer, key):
pass
def syncRepo(self, env=None, repo=None, config_file=None, prefix_with=None,
feed=None, basic_auth_username=None, basic_auth_password=None,
ssl_validation=None):
pass
def getRepos(self, rids, fields=None):
pass
def getPrefix(self):
return 'redhat-'
def createRepo(self, repo_id, url, registry_id=None, desc=None,
title=None, protected=False, distributors=True,
prefix_with='redhat-', productline=None):
pass
def crane(self, repos, wait=True):
pass
class TestPostPulpSync(object):
@staticmethod
def workflow(docker_repos, registry=None):
images = []
for tag in ['1.0-1', '1.0', 'latest', 'unique-timestamp']:
images.extend([ImageName.parse(
'{0}/{1}:{2}'.format(registry, repo, tag).lstrip('/')
) for repo in docker_repos])
tag_conf = flexmock(images=images)
push_conf = PushConf()
return flexmock(tag_conf=tag_conf,
push_conf=push_conf,
postbuild_plugins_conf=[])
@pytest.mark.parametrize('get_prefix', [True, False])
@pytest.mark.parametrize(('pulp_repo_prefix', 'expected_prefix'), [
(None, 'redhat-'),
('prefix-', 'prefix-')
])
def test_pulp_repo_prefix(self,
get_prefix,
pulp_repo_prefix,
expected_prefix):
docker_registry = 'http://registry.example.com'
docker_repository = 'prod/myrepository'
prefixed_pulp_repoid = '{}prod-myrepository'.format(expected_prefix)
env = 'pulp'
kwargs = {}
if pulp_repo_prefix:
kwargs['pulp_repo_prefix'] = pulp_repo_prefix
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow([docker_repository]),
pulp_registry_name=env,
docker_registry=docker_registry,
**kwargs)
mockpulp = MockPulp()
if get_prefix:
(flexmock(mockpulp)
.should_receive('getPrefix')
.with_args()
.and_return(expected_prefix))
else:
(flexmock(mockpulp)
.should_receive('getPrefix')
.with_args()
.and_raise(AttributeError))
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args([prefixed_pulp_repoid], fields=['id'])
.and_return([{'id': prefixed_pulp_repoid}])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry)
.and_return(([], []))
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('crane')
.with_args([prefixed_pulp_repoid], wait=True)
.once()
.ordered())
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
plugin.run()
def test_auth_none(self):
docker_registry = 'http://registry.example.com'
docker_repository = 'prod/myrepository'
prefixed_pulp_repoid = 'redhat-prod-myrepository'
env = 'pulp'
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow([docker_repository]),
pulp_registry_name=env,
docker_registry=docker_registry)
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('login')
.never())
(flexmock(mockpulp)
.should_receive('set_certs')
.never())
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args([prefixed_pulp_repoid], fields=['id'])
.and_return([{'id': prefixed_pulp_repoid}])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry)
.and_return(([], []))
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('crane')
.with_args([prefixed_pulp_repoid], wait=True)
.once()
.ordered())
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
plugin.run()
@pytest.mark.parametrize('cer_exists', [True, False])
@pytest.mark.parametrize('key_exists', [True, False])
def test_pulp_auth(self, tmpdir, cer_exists, key_exists):
pulp_secret_path = str(tmpdir)
cer = pulp_secret_path + '/pulp.cer'
key = pulp_secret_path + '/pulp.key'
if cer_exists:
open(cer, 'w').close()
if key_exists:
open(key, 'w').close()
docker_registry = 'http://registry.example.com'
docker_repository = 'prod/myrepository'
prefixed_pulp_repoid = 'redhat-prod-myrepository'
env = 'pulp'
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow([docker_repository]),
pulp_registry_name=env,
docker_registry=docker_registry,
pulp_secret_path=pulp_secret_path)
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('login')
.never())
if cer_exists and key_exists:
(flexmock(mockpulp)
.should_receive('set_certs')
.with_args(cer, key)
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args([prefixed_pulp_repoid], fields=['id'])
.and_return([{'id': prefixed_pulp_repoid}])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry)
.and_return(([], []))
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('crane')
.with_args([prefixed_pulp_repoid], wait=True)
.once()
.ordered())
else:
(flexmock(mockpulp)
.should_receive('set_certs')
.never())
(flexmock(mockpulp)
.should_receive('syncRepo')
.never())
(flexmock(mockpulp)
.should_receive('crane')
.never())
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
if cer_exists and key_exists:
plugin.run()
else:
with pytest.raises(RuntimeError):
plugin.run()
@pytest.mark.parametrize('content', [
None,
'{"invalid-json',
])
def test_dockercfg_missing_or_invalid(self, tmpdir, content):
env = 'pulp'
if content is not None:
registry_secret = os.path.join(str(tmpdir), '.dockercfg')
with open(registry_secret, 'w') as fp:
fp.write(content)
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow(['repo']),
pulp_registry_name=env,
docker_registry='http://registry.example.com',
registry_secret_path=str(tmpdir))
mockpulp = MockPulp()
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
with pytest.raises(RuntimeError):
plugin.run()
def test_dockercfg_registry_not_present(self, tmpdir):
docker_registry = 'http://registry.example.com'
docker_repository = 'prod/myrepository'
prefixed_pulp_repoid = 'redhat-prod-myrepository'
env = 'pulp'
registry_secret = os.path.join(str(tmpdir), '.dockercfg')
dockercfg = {
'other-registry.example.com': {
'username': 'user',
'password': 'pass',
'email': 'user@example.com',
},
}
with open(registry_secret, 'w') as fp:
json.dump(dockercfg, fp)
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow([docker_repository]),
pulp_registry_name=env,
docker_registry=docker_registry,
registry_secret_path=str(tmpdir))
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args([prefixed_pulp_repoid], fields=['id'])
.and_return([{'id': prefixed_pulp_repoid}])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry)
.and_return(([], []))
.once()
.ordered())
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
plugin.run()
@pytest.mark.parametrize('scheme', ['http', 'https'])
def test_dockercfg(self, tmpdir, scheme):
docker_registry = '{}://registry.example.com'.format(scheme)
docker_repository = 'prod/myrepository'
prefixed_pulp_repoid = 'redhat-prod-myrepository'
user = 'user'
pw = 'pass'
env = 'pulp'
registry_secret = os.path.join(str(tmpdir), '.dockercfg')
dockercfg = {
'registry.example.com': {
'username': user,
'password': pw,
'email': 'user@example.com',
},
}
with open(registry_secret, 'w') as fp:
json.dump(dockercfg, fp)
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow([docker_repository]),
pulp_registry_name=env,
docker_registry=docker_registry,
registry_secret_path=str(tmpdir))
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args([prefixed_pulp_repoid], fields=['id'])
.and_return([{'id': prefixed_pulp_repoid}])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry,
basic_auth_username=user,
basic_auth_password=pw)
.and_return(([], []))
.once()
.ordered())
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
plugin.run()
@pytest.mark.parametrize(('insecure_registry', 'ssl_validation'), [
(None, None),
(True, False),
(False, True),
])
def test_insecure_registry(self, insecure_registry, ssl_validation):
docker_registry = 'http://registry.example.com'
docker_repository = 'prod/myrepository'
prefixed_pulp_repoid = 'redhat-prod-myrepository'
env = 'pulp'
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow([docker_repository]),
pulp_registry_name=env,
docker_registry=docker_registry,
insecure_registry=insecure_registry)
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args([prefixed_pulp_repoid], fields=['id'])
.and_return([{'id': prefixed_pulp_repoid}])
.once()
.ordered())
sync_exp = flexmock(mockpulp).should_receive('syncRepo')
if ssl_validation is None:
sync_exp = sync_exp.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry)
else:
sync_exp = sync_exp.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry,
ssl_validation=ssl_validation)
(sync_exp
.and_return(([], []))
.once()
.ordered())
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
plugin.run()
@pytest.mark.parametrize('fail', [False, True])
def test_dockpulp_loglevel(self, fail, caplog):
loglevel = 3
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args(['redhat-prod-myrepository'], fields=['id'])
.and_return([{'id': 'redhat-prod-myrepository'}])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.and_return(([], [])))
flexmock(dockpulp).should_receive('Pulp').and_return(mockpulp)
logger = flexmock()
expectation = (logger
.should_receive('setLevel')
.with_args(loglevel)
.once())
if fail:
expectation.and_raise(ValueError)
(flexmock(dockpulp)
.should_receive('setup_logger')
.and_return(logger)
.once())
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow(['prod/myrepository']),
pulp_registry_name='pulp',
docker_registry='http://registry.example.com',
dockpulp_loglevel=loglevel)
plugin.run()
errors = [record.getMessage() for record in caplog.records()
if record.levelname == 'ERROR']
if fail:
assert len(errors) >= 1
else:
assert not errors
@pytest.mark.parametrize('already_exists', [False, True])
def test_store_registry(self, already_exists):
docker_registry = 'http://registry.example.com'
docker_repository = 'prod/myrepository'
prefixed_pulp_repoid = 'redhat-prod-myrepository'
env = 'pulp'
workflow = self.workflow([docker_repository])
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('login')
.never())
(flexmock(mockpulp)
.should_receive('set_certs')
.never())
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args([prefixed_pulp_repoid], fields=['id'])
.and_return([{'id': prefixed_pulp_repoid}])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry)
.and_return(([], []))
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('crane')
.with_args([prefixed_pulp_repoid], wait=True)
.once()
.ordered())
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
if already_exists:
workflow.push_conf.add_pulp_registry(env, mockpulp.registry,
server_side_sync=False)
plugin = PulpSyncPlugin(tasker=None,
workflow=workflow,
pulp_registry_name=env,
docker_registry=docker_registry)
num_registries = len(workflow.push_conf.pulp_registries)
assert num_registries == (1 if already_exists else 0)
plugin.run()
assert len(workflow.push_conf.pulp_registries) == 1
def test_delete_not_implemented(self, caplog):
"""
Should log an error (but not raise an exception) when
delete_from_registry is True.
"""
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args(['redhat-prod-myrepository'], fields=['id'])
.and_return([{'id': 'redhat-prod-myrepository'}])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.and_return(([], [])))
flexmock(dockpulp).should_receive('Pulp').and_return(mockpulp)
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow(['prod/myrepository']),
pulp_registry_name='pulp',
docker_registry='http://registry.example.com',
delete_from_registry=True)
plugin.run()
errors = [record.getMessage() for record in caplog.records()
if record.levelname == 'ERROR']
assert [message for message in errors
if 'not implemented' in message]
def test_create_missing_repo(self):
docker_registry = 'http://registry.example.com'
docker_repository = 'prod/myrepository'
prefixed_pulp_repoid = 'redhat-prod-myrepository'
env = 'pulp'
plugin = PulpSyncPlugin(tasker=None,
workflow=self.workflow([docker_repository]),
pulp_registry_name=env,
docker_registry=docker_registry)
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args([prefixed_pulp_repoid], fields=['id'])
.and_return([])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('createRepo')
.with_args(prefixed_pulp_repoid, None,
registry_id=docker_repository,
prefix_with='redhat-')
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry)
.and_return(([], []))
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('crane')
.with_args([prefixed_pulp_repoid], wait=True)
.once()
.ordered())
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
plugin.run()
@pytest.mark.parametrize('publish,has_pulp_push,should_publish', [
(None, False, True),
(None, True, False),
(True, False, True),
(True, True, False),
(False, False, False),
(False, True, False),
])
def test_publish(self, publish, has_pulp_push, should_publish, caplog):
docker_registry = 'http://registry.example.com'
docker_repository = 'prod/myrepository'
prefixed_pulp_repoid = 'redhat-prod-myrepository'
env = 'pulp'
mockpulp = MockPulp()
(flexmock(mockpulp)
.should_receive('login')
.never())
(flexmock(mockpulp)
.should_receive('set_certs')
.never())
(flexmock(mockpulp)
.should_receive('getRepos')
.with_args([prefixed_pulp_repoid], fields=['id'])
.and_return([{'id': prefixed_pulp_repoid}])
.once()
.ordered())
(flexmock(mockpulp)
.should_receive('syncRepo')
.with_args(repo=prefixed_pulp_repoid,
feed=docker_registry)
.and_return(([], []))
.once()
.ordered())
if should_publish:
(flexmock(mockpulp)
.should_receive('crane')
.with_args([prefixed_pulp_repoid], wait=True)
.once()
.ordered())
else:
(flexmock(mockpulp)
.should_receive('crane')
.never())
(flexmock(dockpulp)
.should_receive('Pulp')
.with_args(env=env)
.and_return(mockpulp))
workflow = self.workflow([docker_repository], mockpulp.registry)
workflow.postbuild_plugins_conf.append(
{
'name': PulpSyncPlugin.key,
},
)
if has_pulp_push:
workflow.postbuild_plugins_conf.append(
{
'name': PLUGIN_PULP_PUSH_KEY,
},
)
kwargs = {
'pulp_registry_name': env,
'docker_registry': docker_registry,
}
if publish is not None:
kwargs['publish'] = publish
plugin = PulpSyncPlugin(tasker=None,
workflow=workflow,
**kwargs)
plugin.run()
log_messages = [l.getMessage() for l in caplog.records()]
for image in workflow.tag_conf.images:
expected_log = 'image available at %s' % image.to_str()
if should_publish:
assert expected_log in log_messages
else:
assert expected_log not in log_messages
|
|
# -*- coding: utf-8 -*-
import pymongo
from modularodm import fields
from framework.auth.decorators import Auth
from website.models import NodeLog
from website.addons.base import GuidFile
from website.addons.base import exceptions
from website.addons.base import AddonNodeSettingsBase, AddonUserSettingsBase
from website.addons.base import StorageAddonBase
from . import messages
from .api import Figshare
from . import exceptions as fig_exceptions
from . import settings as figshare_settings
class FigShareGuidFile(GuidFile):
__indices__ = [
{
'key_or_list': [
('node', pymongo.ASCENDING),
('article_id', pymongo.ASCENDING),
('file_id', pymongo.ASCENDING),
],
'unique': True,
}
]
article_id = fields.StringField(index=True)
file_id = fields.StringField(index=True)
@property
def waterbutler_path(self):
if getattr(self.node.get_addon('figshare'), 'figshare_type', None) == 'project':
return '/{}/{}'.format(self.article_id, self.file_id)
return '/' + str(self.file_id)
@property
def provider(self):
return 'figshare'
@property
def external_url(self):
extra = self._metadata_cache['extra']
if extra['status'] == 'public':
return self._metadata_cache['extra']['webView']
return None
def _exception_from_response(self, response):
try:
if response.json()['data']['extra']['status'] == 'drafts':
self._metadata_cache = response.json()['data']
raise fig_exceptions.FigshareIsDraftError(self)
except KeyError:
pass
super(FigShareGuidFile, self)._exception_from_response(response)
@property
def version_identifier(self):
return ''
@property
def unique_identifier(self):
return '{}{}'.format(self.article_id, self.file_id)
class AddonFigShareUserSettings(AddonUserSettingsBase):
oauth_request_token = fields.StringField()
oauth_request_token_secret = fields.StringField()
oauth_access_token = fields.StringField()
oauth_access_token_secret = fields.StringField()
@property
def has_auth(self):
return self.oauth_access_token is not None
def to_json(self, user):
ret = super(AddonFigShareUserSettings, self).to_json(user)
ret.update({
'authorized': self.has_auth,
'name': self.owner.display_full_name(),
'profile_url': self.owner.profile_url,
})
return ret
def remove_auth(self, save=False):
self.oauth_access_token = None
self.oauth_access_token_secret = None
for node_settings in self.addonfigsharenodesettings__authorized:
node_settings.deauthorize(auth=Auth(user=self.owner), save=True)
if save:
self.save()
def delete(self, save=False):
self.remove_auth(save=False)
super(AddonFigShareUserSettings, self).delete(save=save)
class AddonFigShareNodeSettings(StorageAddonBase, AddonNodeSettingsBase):
figshare_id = fields.StringField()
figshare_type = fields.StringField()
figshare_title = fields.StringField()
user_settings = fields.ForeignField(
'addonfigshareusersettings', backref='authorized'
)
@property
def folder_name(self):
return self.figshare_title
def archive_errors(self):
api = Figshare.from_settings(self.user_settings)
items = []
if self.figshare_type in ('article', 'fileset'):
items = api.article(self, self.figshare_id)['items']
else:
items = api.project(self, self.figshare_id)['articles']
private = any(
[item for item in items if item['status'] != 'Public']
)
if private:
return 'The figshare {figshare_type} <strong>{figshare_title}</strong> contains private content that we cannot copy to the registration. If this content is made public on figshare we should then be able to copy those files. You can view those files <a href="{url}" target="_blank">here.</a>'.format(
figshare_type=self.figshare_type,
figshare_title=self.figshare_title,
url=self.owner.web_url_for('collect_file_trees'))
def find_or_create_file_guid(self, path):
# path should be /aid/fid
# split return ['', aid, fid] or ['', fid]
split_path = path.split('/')
if len(split_path) == 3:
_, article_id, file_id = split_path
else:
_, file_id = split_path
article_id = self.figshare_id
return FigShareGuidFile.get_or_create(
node=self.owner,
file_id=file_id,
article_id=article_id,
)
@property
def api_url(self):
if self.user_settings is None:
return figshare_settings.API_URL
else:
return figshare_settings.API_OAUTH_URL
@property
def has_auth(self):
return bool(self.user_settings and self.user_settings.has_auth)
@property
def complete(self):
return self.has_auth and self.figshare_id is not None
@property
def linked_content(self):
return {
'id': self.figshare_id,
'type': self.figshare_type,
'name': self.figshare_title,
}
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
node = self.owner
node.add_log(
action='figshare_node_authorized',
params={
'project': node.parent_id,
'node': node._id,
},
auth=Auth(user=user_settings.owner),
)
if save:
self.save()
def deauthorize(self, auth=None, add_log=True, save=False):
"""Remove user authorization from this node and log the event."""
self.user_settings = None
self.figshare_id = None
self.figshare_type = None
self.figshare_title = None
if add_log:
node = self.owner
self.owner.add_log(
action='figshare_node_deauthorized',
params={
'project': node.parent_id,
'node': node._id,
},
auth=auth,
)
if save:
self.save()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Cannot serialize credentials for unauthorized addon')
return {
'client_token': figshare_settings.CLIENT_ID,
'client_secret': figshare_settings.CLIENT_SECRET,
'owner_token': self.user_settings.oauth_access_token,
'owner_secret': self.user_settings.oauth_access_token_secret,
}
def serialize_waterbutler_settings(self):
if not self.figshare_type or not self.figshare_id:
raise exceptions.AddonError('Cannot serialize settings for unconfigured addon')
return {
'container_type': self.figshare_type,
'container_id': str(self.figshare_id),
}
def create_waterbutler_log(self, auth, action, metadata):
if action in [NodeLog.FILE_ADDED, NodeLog.FILE_UPDATED]:
name = metadata['name']
url = self.owner.web_url_for('addon_view_or_download_file', provider='figshare', path=metadata['path'])
urls = {
'view': url,
'download': url + '?action=download'
}
elif action == NodeLog.FILE_REMOVED:
name = metadata['path']
urls = {}
self.owner.add_log(
'figshare_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': name,
'urls': urls,
'figshare': {
'id': self.figshare_id,
'type': self.figshare_type,
},
},
)
def delete(self, save=False):
super(AddonFigShareNodeSettings, self).delete(save=False)
self.deauthorize(add_log=False, save=save)
def update_fields(self, fields, node, auth):
updated = False
if fields.get('id'):
updated = updated or (fields['id'] != self.figshare_id)
self.figshare_id = fields['id']
if fields.get('name'):
updated = updated or (fields['name'] != self.figshare_title)
self.figshare_title = fields['name']
if fields.get('type'):
updated = updated or (fields['type'] != self.figshare_type)
self.figshare_type = fields['type']
self.save()
if updated:
node.add_log(
action='figshare_content_linked',
params={
'project': node.parent_id,
'node': node._id,
'figshare': {
'type': self.figshare_type,
'id': self.figshare_id,
'title': self.figshare_title,
},
},
auth=auth,
)
def to_json(self, user):
ret = super(AddonFigShareNodeSettings, self).to_json(user)
figshare_user = user.get_addon('figshare')
ret.update({
'figshare_id': self.figshare_id or '',
'figshare_type': self.figshare_type or '',
'figshare_title': self.figshare_title or '',
'node_has_auth': self.has_auth,
'user_has_auth': bool(figshare_user) and figshare_user.has_auth,
'figshare_options': [],
'is_registration': self.owner.is_registration,
})
if self.has_auth:
ret.update({
'authorized_user': self.user_settings.owner.fullname,
'owner_url': self.user_settings.owner.url,
'is_owner': user == self.user_settings.owner
})
return ret
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
if not self.figshare_id:
return []
figshare = node.get_addon('figshare')
# Quit if no user authorization
node_permissions = 'public' if node.is_public else 'private'
if figshare.figshare_type == 'project':
if node_permissions == 'private':
message = messages.BEFORE_PAGE_LOAD_PRIVATE_NODE_MIXED_FS.format(category=node.project_or_component, project_id=figshare.figshare_id)
return [message]
else:
message = messages.BEFORE_PAGE_LOAD_PUBLIC_NODE_MIXED_FS.format(category=node.project_or_component, project_id=figshare.figshare_id)
connect = Figshare.from_settings(self.user_settings)
article_is_public = connect.article_is_public(self.figshare_id)
article_permissions = 'public' if article_is_public else 'private'
if article_permissions != node_permissions:
message = messages.BEFORE_PAGE_LOAD_PERM_MISMATCH.format(
category=node.project_or_component,
node_perm=node_permissions,
figshare_perm=article_permissions,
figshare_id=self.figshare_id,
)
if article_permissions == 'private' and node_permissions == 'public':
message += messages.BEFORE_PAGE_LOAD_PUBLIC_NODE_PRIVATE_FS
return [message]
def before_remove_contributor(self, node, removed):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
return messages.BEFORE_REMOVE_CONTRIBUTOR.format(
category=node.project_or_component,
user=removed.fullname,
)
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the FigShare add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=node.category_display,
title=node.title,
user=removed.fullname
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <a href="{url}">Settings</a> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""
:param Node node: Original node
:param Node fork: Forked node
:param User user: User creating fork
:param bool save: Save settings after callback
:return tuple: Tuple of cloned settings and alert message
"""
clone, _ = super(AddonFigShareNodeSettings, self).after_fork(
node, fork, user, save=False
)
# Copy authentication if authenticated by forking user
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
message = messages.AFTER_FORK_OWNER.format(
category=fork.project_or_component,
)
else:
message = messages.AFTER_FORK_NOT_OWNER.format(
category=fork.project_or_component,
url=fork.url + 'settings/'
)
return AddonFigShareNodeSettings(), message
if save:
clone.save()
return clone, message
def before_make_public(self, node):
return (
'This {cat} is connected to a figshare project. Files marked as '
'private on figshare <strong>will be visible to the public'
'</strong>.'
).format(
cat=node.project_or_component,
)
def after_delete(self, node, user):
self.deauthorize(Auth(user=user), add_log=True, save=True)
def before_register(self, node, user):
if self.has_auth and self.figshare_id:
return messages.BEFORE_REGISTER.format(
category=node.project_or_component,
)
|
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Classes and functions for layer 2 protocols.
"""
import os,struct,time
from scapy.base_classes import Net
from scapy.config import conf
from scapy.packet import *
from scapy.ansmachine import *
from scapy.plist import SndRcvList
from scapy.fields import *
from scapy.sendrecv import srp,srp1
from scapy.arch import get_if_hwaddr
#################
## Tools ##
#################
class Neighbor:
def __init__(self):
self.resolvers = {}
def register_l3(self, l2, l3, resolve_method):
self.resolvers[l2,l3]=resolve_method
def resolve(self, l2inst, l3inst):
k = l2inst.__class__,l3inst.__class__
if k in self.resolvers:
return self.resolvers[k](l2inst,l3inst)
def __repr__(self):
return "\n".join("%-15s -> %-15s" % (l2.__name__, l3.__name__) for l2,l3 in self.resolvers)
conf.neighbor = Neighbor()
conf.netcache.new_cache("arp_cache", 120) # cache entries expire after 120s
@conf.commands.register
def getmacbyip(ip, chainCC=0):
"""Return MAC address corresponding to a given IP address"""
if isinstance(ip,Net):
ip = next(iter(ip))
ip = inet_ntoa(inet_aton(ip))
tmp = inet_aton(ip)
if (tmp[0] & 0xf0) == 0xe0: # mcast @
return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1]&0x7f,tmp[2],tmp[3])
iff,a,gw = conf.route.route(ip)
if ( (iff == "lo") or (ip == conf.route.get_if_bcast(iff)) ):
return "ff:ff:ff:ff:ff:ff"
if gw != "0.0.0.0":
ip = gw
mac = conf.netcache.arp_cache.get(ip)
if mac:
return mac
res = srp1(Ether(dst=ETHER_BROADCAST)/ARP(op="who-has", pdst=ip),
type=ETH_P_ARP,
iface = iff,
timeout=2,
verbose=0,
chainCC=chainCC,
nofilter=1)
if res is not None:
mac = res.payload.hwsrc
conf.netcache.arp_cache[ip] = mac
return mac
return None
### Fields
class DestMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
class SourceMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
class ARPSourceMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
### Layers
ETHER_TYPES['802_AD'] = 0x88a8
class Ether(Packet):
name = "Ethernet"
fields_desc = [ MACField("dst","00:00:00:01:00:00"),
MACField("src","00:00:00:02:00:00"),
XShortEnumField("type", 0x9000, ETHER_TYPES) ]
def hashret(self):
return struct.pack("H",self.type)+self.payload.hashret()
def answers(self, other):
if isinstance(other,Ether):
if self.type == other.type:
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("%src% > %dst% (%type%)")
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] <= 1500:
return Dot3
return cls
class Dot3(Packet):
name = "802.3"
fields_desc = [ DestMACField("dst"),
MACField("src", ETHER_ANY),
LenField("len", None, "H") ]
def extract_padding(self,s):
l = self.len
return s[:l],s[l:]
def answers(self, other):
if isinstance(other,Dot3):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return "802.3 %s > %s" % (self.src, self.dst)
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] > 1500:
return Ether
return cls
class LLC(Packet):
name = "LLC"
fields_desc = [ XByteField("dsap", 0x00),
XByteField("ssap", 0x00),
ByteField("ctrl", 0) ]
conf.neighbor.register_l3(Ether, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
conf.neighbor.register_l3(Dot3, LLC, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class CookedLinux(Packet):
name = "cooked linux"
fields_desc = [ ShortEnumField("pkttype",0, {0: "unicast",
4:"sent-by-us"}), #XXX incomplete
XShortField("lladdrtype",512),
ShortField("lladdrlen",0),
StrFixedLenField("src","",8),
XShortEnumField("proto",0x800,ETHER_TYPES) ]
class SNAP(Packet):
name = "SNAP"
fields_desc = [ X3BytesField("OUI",0x000000),
XShortEnumField("code", 0x000, ETHER_TYPES) ]
conf.neighbor.register_l3(Dot3, SNAP, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class Dot1Q(Packet):
name = "802.1Q"
aliastypes = [ Ether ]
fields_desc = [ BitField("prio", 0, 3),
BitField("id", 0, 1),
BitField("vlan", 1, 12),
XShortEnumField("type", 0x0000, ETHER_TYPES) ]
def answers(self, other):
if isinstance(other,Dot1Q):
if ( (self.type == other.type) and
(self.vlan == other.vlan) ):
return self.payload.answers(other.payload)
else:
return self.payload.answers(other)
return 0
def default_payload_class(self, pay):
if self.type <= 1500:
return LLC
return conf.raw_layer
def extract_padding(self,s):
if self.type <= 1500:
return s[:self.type],s[self.type:]
return s,None
def mysummary(self):
if isinstance(self.underlayer, Ether):
return self.underlayer.sprintf("802.1q %Ether.src% > %Ether.dst% (%Dot1Q.type%) vlan %Dot1Q.vlan%")
else:
return self.sprintf("802.1q (%Dot1Q.type%) vlan %Dot1Q.vlan%")
conf.neighbor.register_l3(Ether, Dot1Q, lambda l2,l3: conf.neighbor.resolve(l2,l3.payload))
class STP(Packet):
name = "Spanning Tree Protocol"
fields_desc = [ ShortField("proto", 0),
ByteField("version", 0),
ByteField("bpdutype", 0),
ByteField("bpduflags", 0),
ShortField("rootid", 0),
MACField("rootmac", ETHER_ANY),
IntField("pathcost", 0),
ShortField("bridgeid", 0),
MACField("bridgemac", ETHER_ANY),
ShortField("portid", 0),
BCDFloatField("age", 1),
BCDFloatField("maxage", 20),
BCDFloatField("hellotime", 2),
BCDFloatField("fwddelay", 15) ]
class EAPOL(Packet):
name = "EAPOL"
fields_desc = [ ByteField("version", 1),
ByteEnumField("type", 0, ["EAP_PACKET", "START", "LOGOFF", "KEY", "ASF"]),
LenField("len", None, "H") ]
EAP_PACKET= 0
START = 1
LOGOFF = 2
KEY = 3
ASF = 4
def extract_padding(self, s):
l = self.len
return s[:l],s[l:]
def hashret(self):
#return chr(self.type)+self.payload.hashret()
return bytes([self.type])+self.payload.hashret()
def answers(self, other):
if isinstance(other,EAPOL):
if ( (self.type == self.EAP_PACKET) and
(other.type == self.EAP_PACKET) ):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("EAPOL %EAPOL.type%")
class EAP(Packet):
name = "EAP"
fields_desc = [ ByteEnumField("code", 4, {1:"REQUEST",2:"RESPONSE",3:"SUCCESS",4:"FAILURE"}),
ByteField("id", 0),
ShortField("len",None),
ConditionalField(ByteEnumField("type",0, {1:"ID",4:"MD5"}), lambda pkt:pkt.code not in [EAP.SUCCESS, EAP.FAILURE])
]
REQUEST = 1
RESPONSE = 2
SUCCESS = 3
FAILURE = 4
TYPE_ID = 1
TYPE_MD5 = 4
def answers(self, other):
if isinstance(other,EAP):
if self.code == self.REQUEST:
return 0
elif self.code == self.RESPONSE:
if ( (other.code == self.REQUEST) and
(other.type == self.type) ):
return 1
elif other.code == self.RESPONSE:
return 1
return 0
def post_build(self, p, pay):
if self.len is None:
l = len(p)+len(pay)
p = p[:2]+bytes([((l>>8)&0xff),(l&0xff)])+p[4:]
return p+pay
class ARP(Packet):
name = "ARP"
fields_desc = [ XShortField("hwtype", 0x0001),
XShortEnumField("ptype", 0x0800, ETHER_TYPES),
ByteField("hwlen", 6),
ByteField("plen", 4),
ShortEnumField("op", 1, {"who-has":1, "is-at":2, "RARP-req":3, "RARP-rep":4, "Dyn-RARP-req":5, "Dyn-RAR-rep":6, "Dyn-RARP-err":7, "InARP-req":8, "InARP-rep":9}),
ARPSourceMACField("hwsrc"),
SourceIPField("psrc","pdst"),
MACField("hwdst", ETHER_ANY),
IPField("pdst", "0.0.0.0") ]
who_has = 1
is_at = 2
def answers(self, other):
if isinstance(other,ARP):
if ( (self.op == self.is_at) and
(other.op == self.who_has) and
(self.psrc == other.pdst) ):
return 1
return 0
def route(self):
dst = self.pdst
if isinstance(dst,Gen):
dst = next(iter(dst))
return conf.route.route(dst)
def extract_padding(self, s):
return b"",s
def mysummary(self):
if self.op == self.is_at:
return self.sprintf("ARP is at %hwsrc% says %psrc%")
elif self.op == self.who_has:
return self.sprintf("ARP who has %pdst% says %psrc%")
else:
return self.sprintf("ARP %op% %psrc% > %pdst%")
conf.neighbor.register_l3(Ether, ARP, lambda l2,l3: getmacbyip(l3.pdst))
class GRErouting(Packet):
name = "GRE routing informations"
fields_desc = [ ShortField("address_family",0),
ByteField("SRE_offset", 0),
FieldLenField("SRE_len", None, "routing_info", "B"),
StrLenField("routing_info", "", "SRE_len"),
]
class GRE(Packet):
name = "GRE"
fields_desc = [ BitField("chksum_present",0,1),
BitField("routing_present",0,1),
BitField("key_present",0,1),
BitField("seqnum_present",0,1),
BitField("strict_route_source",0,1),
BitField("recursion_control",0,3),
BitField("flags",0,5),
BitField("version",0,3),
XShortEnumField("proto", 0x0000, ETHER_TYPES),
ConditionalField(XShortField("chksum",None), lambda pkt:pkt.chksum_present==1 or pkt.routing_present==1),
ConditionalField(XShortField("offset",None), lambda pkt:pkt.chksum_present==1 or pkt.routing_present==1),
ConditionalField(XIntField("key",None), lambda pkt:pkt.key_present==1),
ConditionalField(XIntField("seqence_number",None), lambda pkt:pkt.seqnum_present==1),
]
def post_build(self, p, pay):
p += pay
if self.chksum_present and self.chksum is None:
c = checksum(p)
p = p[:4]+bytes([((c>>8)&0xff),(c&0xff)])+p[6:]
return p
class Dot1AD(Dot1Q):
name = '802_1AD'
bind_layers( Dot3, LLC, )
bind_layers( Ether, LLC, type=122)
bind_layers( Ether, Dot1Q, type=33024)
bind_layers( Ether, Dot1AD, type=0x88a8)
bind_layers( Dot1AD, Dot1AD, type=0x88a8)
bind_layers( Dot1AD, Dot1Q, type=0x8100)
bind_layers( Dot1Q, Dot1AD, type=0x88a8)
bind_layers( Ether, Ether, type=1)
bind_layers( Ether, ARP, type=2054)
bind_layers( Ether, EAPOL, type=34958)
bind_layers( Ether, EAPOL, dst='01:80:c2:00:00:03', type=34958)
bind_layers( CookedLinux, LLC, proto=122)
bind_layers( CookedLinux, Dot1Q, proto=33024)
bind_layers( CookedLinux, Ether, proto=1)
bind_layers( CookedLinux, ARP, proto=2054)
bind_layers( CookedLinux, EAPOL, proto=34958)
bind_layers( GRE, LLC, proto=122)
bind_layers( GRE, Dot1Q, proto=33024)
bind_layers( GRE, Ether, proto=1)
bind_layers( GRE, ARP, proto=2054)
bind_layers( GRE, EAPOL, proto=34958)
bind_layers( GRE, GRErouting, { "routing_present" : 1 } )
bind_layers( GRErouting, conf.raw_layer,{ "address_family" : 0, "SRE_len" : 0 })
bind_layers( GRErouting, GRErouting, { } )
bind_layers( EAPOL, EAP, type=0)
bind_layers( LLC, STP, dsap=66, ssap=66, ctrl=3)
bind_layers( LLC, SNAP, dsap=170, ssap=170, ctrl=3)
bind_layers( SNAP, Dot1Q, code=33024)
bind_layers( SNAP, Ether, code=1)
bind_layers( SNAP, ARP, code=2054)
bind_layers( SNAP, EAPOL, code=34958)
bind_layers( SNAP, STP, code=267)
conf.l2types.register(ARPHDR_ETHER, Ether)
conf.l2types.register_num2layer(ARPHDR_METRICOM, Ether)
conf.l2types.register_num2layer(ARPHDR_LOOPBACK, Ether)
conf.l2types.register_layer2num(ARPHDR_ETHER, Dot3)
conf.l2types.register(144, CookedLinux) # called LINUX_IRDA, similar to CookedLinux
conf.l2types.register(113, CookedLinux)
conf.l3types.register(ETH_P_ARP, ARP)
### Technics
@conf.commands.register
def arpcachepoison(target, victim, interval=60):
"""Poison target's cache with (your MAC,victim's IP) couple
arpcachepoison(target, victim, [interval=60]) -> None
"""
tmac = getmacbyip(target)
p = Ether(dst=tmac)/ARP(op="who-has", psrc=victim, pdst=target)
try:
while 1:
sendp(p, iface_hint=target)
if conf.verb > 1:
os.write(1,b".")
time.sleep(interval)
except KeyboardInterrupt:
pass
class ARPingResult(SndRcvList):
def __init__(self, res=None, name="ARPing", stats=None):
SndRcvList.__init__(self, res, name, stats)
def show(self):
for s,r in self.res:
print(r.sprintf("%19s,Ether.src% %ARP.psrc%"))
@conf.commands.register
def arping(net, timeout=2, cache=0, verbose=None, **kargs):
"""Send ARP who-has requests to determine which hosts are up
arping(net, [cache=0,] [iface=conf.iface,] [verbose=conf.verb]) -> None
Set cache=True if you want arping to modify internal ARP-Cache"""
if verbose is None:
verbose = conf.verb
ans,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=net), verbose=verbose,
filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
ans = ARPingResult(ans.res)
if cache and ans is not None:
for pair in ans:
conf.netcache.arp_cache[pair[1].psrc] = (pair[1].hwsrc, time.time())
if verbose:
ans.show()
return ans,unans
@conf.commands.register
def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00",**kargs):
"""Try to guess if target is in Promisc mode. The target is provided by its ip."""
responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip),type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0,**kargs)
return responses is not None
@conf.commands.register
def promiscping(net, timeout=2, fake_bcast="ff:ff:ff:ff:ff:fe", **kargs):
"""Send ARP who-has requests to determine which hosts are in promiscuous mode
promiscping(net, iface=conf.iface)"""
ans,unans = srp(Ether(dst=fake_bcast)/ARP(pdst=net),
filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
ans = ARPingResult(ans.res, name="PROMISCPing")
ans.display()
return ans,unans
class ARP_am(AnsweringMachine):
function_name="farpd"
filter = "arp"
send_function = staticmethod(sendp)
def parse_options(self, IP_addr=None, iface=None, ARP_addr=None):
self.IP_addr=IP_addr
self.iface=iface
self.ARP_addr=ARP_addr
def is_request(self, req):
return (req.haslayer(ARP) and
req.getlayer(ARP).op == 1 and
(self.IP_addr == None or self.IP_addr == req.getlayer(ARP).pdst))
def make_reply(self, req):
ether = req.getlayer(Ether)
arp = req.getlayer(ARP)
iff,a,gw = conf.route.route(arp.psrc)
if self.iface != None:
iff = iface
ARP_addr = self.ARP_addr
IP_addr = arp.pdst
resp = Ether(dst=ether.src,
src=ARP_addr)/ARP(op="is-at",
hwsrc=ARP_addr,
psrc=IP_addr,
hwdst=arp.hwsrc,
pdst=arp.pdst)
return resp
def sniff(self):
sniff(iface=self.iface, **self.optsniff)
@conf.commands.register
def etherleak(target, **kargs):
"""Exploit Etherleak flaw"""
return srpflood(Ether()/ARP(pdst=target),
prn=lambda a: conf.padding_layer in a[1] and hexstr(a[1][conf.padding_layer].load),
filter="arp", **kargs)
|
|
import logging
import sys
from kombu.tests.utils import redirect_stdouts
from celery import beat
from celery import platforms
from celery.app import app_or_default
from celery.bin import celerybeat as celerybeat_bin
from celery.apps import beat as beatapp
from celery.utils.compat import defaultdict
from celery.tests.utils import AppCase
class MockedShelveModule(object):
shelves = defaultdict(lambda: {})
def open(self, filename, *args, **kwargs):
return self.shelves[filename]
mocked_shelve = MockedShelveModule()
class MockService(beat.Service):
started = False
in_sync = False
persistence = mocked_shelve
def start(self):
self.__class__.started = True
def sync(self):
self.__class__.in_sync = True
class MockBeat(beatapp.Beat):
running = False
def run(self):
self.__class__.running = True
class MockBeat2(beatapp.Beat):
Service = MockService
def install_sync_handler(self, b):
pass
class MockBeat3(beatapp.Beat):
Service = MockService
def install_sync_handler(self, b):
raise TypeError("xxx")
class test_Beat(AppCase):
def test_loglevel_string(self):
b = beatapp.Beat(loglevel="DEBUG")
self.assertEqual(b.loglevel, logging.DEBUG)
b2 = beatapp.Beat(loglevel=logging.DEBUG)
self.assertEqual(b2.loglevel, logging.DEBUG)
def test_init_loader(self):
b = beatapp.Beat()
b.init_loader()
def test_process_title(self):
b = beatapp.Beat()
b.set_process_title()
def test_run(self):
b = MockBeat2()
MockService.started = False
b.run()
self.assertTrue(MockService.started)
def psig(self, fun, *args, **kwargs):
handlers = {}
def i(sig, handler):
handlers[sig] = handler
p, platforms.install_signal_handler = \
platforms.install_signal_handler, i
try:
fun(*args, **kwargs)
return handlers
finally:
platforms.install_signal_handler = p
def test_install_sync_handler(self):
b = beatapp.Beat()
clock = MockService()
MockService.in_sync = False
handlers = self.psig(b.install_sync_handler, clock)
self.assertRaises(SystemExit, handlers["SIGINT"],
"SIGINT", object())
self.assertTrue(MockService.in_sync)
MockService.in_sync = False
def test_setup_logging(self):
b = beatapp.Beat()
b.redirect_stdouts = False
b.setup_logging()
self.assertRaises(AttributeError, getattr, sys.stdout, "logger")
@redirect_stdouts
def test_logs_errors(self, stdout, stderr):
class MockLogger(object):
_critical = []
def debug(self, *args, **kwargs):
pass
def critical(self, msg, *args, **kwargs):
self._critical.append(msg)
logger = MockLogger()
b = MockBeat3(socket_timeout=None)
b.start_scheduler(logger)
self.assertTrue(logger._critical)
@redirect_stdouts
def test_use_pidfile(self, stdout, stderr):
from celery import platforms
class create_pidlock(object):
instance = [None]
def __init__(self, file):
self.file = file
self.instance[0] = self
def acquire(self):
self.acquired = True
class Object(object):
def release(self):
pass
return Object()
prev, platforms.create_pidlock = platforms.create_pidlock, \
create_pidlock
try:
b = MockBeat2(pidfile="pidfilelockfilepid", socket_timeout=None)
b.start_scheduler()
self.assertTrue(create_pidlock.instance[0].acquired)
finally:
platforms.create_pidlock = prev
class MockDaemonContext(object):
opened = False
closed = False
def open(self):
self.__class__.opened = True
def close(self):
self.__class__.closed = True
def create_daemon_context(*args, **kwargs):
context = MockDaemonContext()
return context, context.close
class test_div(AppCase):
def setup(self):
self.prev, beatapp.Beat = beatapp.Beat, MockBeat
self.ctx, celerybeat_bin.create_daemon_context = \
celerybeat_bin.create_daemon_context, create_daemon_context
def teardown(self):
beatapp.Beat = self.prev
def test_main(self):
sys.argv = [sys.argv[0], "-s", "foo"]
try:
celerybeat_bin.main()
self.assertTrue(MockBeat.running)
finally:
MockBeat.running = False
def test_detach(self):
cmd = celerybeat_bin.BeatCommand()
cmd.app = app_or_default()
cmd.run(detach=True)
self.assertTrue(MockDaemonContext.opened)
self.assertTrue(MockDaemonContext.closed)
def test_parse_options(self):
cmd = celerybeat_bin.BeatCommand()
cmd.app = app_or_default()
options, args = cmd.parse_options("celerybeat", ["-s", "foo"])
self.assertEqual(options.schedule, "foo")
|
|
from __future__ import (division, print_function)
from pomegranate import *
from nose.tools import with_setup
from nose.tools import assert_equal
import random
import numpy as np
import time
def setup():
'''
Build a model that we want to use to test sequences. This model will
be somewhat complicated, in order to extensively test YAHMM. This will be
a three state global sequence alignment HMM. The HMM models a reference of
'ACT', with pseudocounts to allow for slight deviations from this
reference.
'''
random.seed(0)
global model
global m1, m2, m3
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transition( model.start, m1, 0.9 )
model.add_transition( model.start, i0, 0.1 )
model.add_transition( m1, m2, 0.9 )
model.add_transition( m1, i1, 0.05 )
model.add_transition( m1, d2, 0.05 )
model.add_transition( m2, m3, 0.9 )
model.add_transition( m2, i2, 0.05 )
model.add_transition( m2, d3, 0.05 )
model.add_transition( m3, model.end, 0.9 )
model.add_transition( m3, i3, 0.1 )
# Create transitions from insert states
model.add_transition( i0, i0, 0.70 )
model.add_transition( i0, d1, 0.15 )
model.add_transition( i0, m1, 0.15 )
model.add_transition( i1, i1, 0.70 )
model.add_transition( i1, d2, 0.15 )
model.add_transition( i1, m2, 0.15 )
model.add_transition( i2, i2, 0.70 )
model.add_transition( i2, d3, 0.15 )
model.add_transition( i2, m3, 0.15 )
model.add_transition( i3, i3, 0.85 )
model.add_transition( i3, model.end, 0.15 )
# Create transitions from delete states
model.add_transition( d1, d2, 0.15 )
model.add_transition( d1, i1, 0.15 )
model.add_transition( d1, m2, 0.70 )
model.add_transition( d2, d3, 0.15 )
model.add_transition( d2, i2, 0.15 )
model.add_transition( d2, m3, 0.70 )
model.add_transition( d3, i3, 0.30 )
model.add_transition( d3, model.end, 0.70 )
# Call bake to finalize the structure of the model.
model.bake()
def teardown():
'''
Remove the model at the end of the unit testing. Since it is stored in a
global variance, simply delete it.
'''
pass
@with_setup( setup, teardown )
def test_viterbi_train():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='viterbi',
verbose=False,
use_pseudocount=True )
assert_equal( round( total_improvement, 4 ), 83.2834 )
@with_setup( setup, teardown )
def test_viterbi_train_no_pseudocount():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='viterbi',
verbose=False,
use_pseudocount=False )
assert_equal( round( total_improvement, 4 ), 84.9318 )
@with_setup( setup, teardown )
def test_viterbi_train_w_pseudocount():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='viterbi',
verbose=False,
transition_pseudocount=1. )
assert_equal( round( total_improvement, 4 ), 79.4713 )
@with_setup( setup, teardown )
def test_viterbi_train_w_pseudocount_priors():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='viterbi',
verbose=False,
transition_pseudocount=0.278,
use_pseudocount=True )
assert_equal( round( total_improvement, 4 ), 81.7439 )
@with_setup( setup, teardown )
def test_viterbi_train_w_inertia():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='viterbi',
verbose=False,
edge_inertia=0.193 )
assert_equal( round( total_improvement, 4 ), 80.6241 )
@with_setup( setup, teardown )
def test_viterbi_train_w_inertia2():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='viterbi',
verbose=False,
edge_inertia=0.82 )
assert_equal( round( total_improvement, 4 ), 48.0067 )
@with_setup( setup, teardown )
def test_viterbi_train_w_pseudocount_inertia():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='viterbi',
verbose=False,
edge_inertia=0.23,
use_pseudocount=True )
assert_equal( round( total_improvement, 4 ), 77.0155 )
@with_setup( setup, teardown )
def test_bw_train():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
use_pseudocount=True,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 83.1132 )
@with_setup( setup, teardown )
def test_bw_train_no_pseudocount():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
use_pseudocount=False,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 85.681 )
@with_setup( setup, teardown )
def test_bw_train_w_pseudocount():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
transition_pseudocount=0.123,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 84.9408 )
@with_setup( setup, teardown )
def test_bw_train_w_pseudocount_priors():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
transition_pseudocount=0.278,
use_pseudocount=True,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 81.2265 )
@with_setup( setup, teardown )
def test_bw_train_w_inertia():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
edge_inertia=0.193,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 85.0528 )
@with_setup( setup, teardown )
def test_bw_train_w_inertia2():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
edge_inertia=0.82,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 72.5134 )
@with_setup( setup, teardown )
def test_bw_train_w_pseudocount_inertia():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
edge_inertia=0.02,
use_pseudocount=True,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 83.0764 )
@with_setup( setup, teardown )
def test_bw_train_w_frozen_distributions():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
distribution_inertia=1.00,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 64.474 )
@with_setup( setup, teardown )
def test_bw_train_w_frozen_edges():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
edge_inertia=1.00,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 44.0208 )
@with_setup( setup, teardown )
def test_bw_train_w_edge_a_distribution_inertia():
seqs = [ list(x) for x in [ 'ACT', 'ACT', 'ACC', 'ACTC', 'ACT', 'ACT', 'CCT',
'CCC', 'AAT', 'CT', 'AT', 'CT', 'CT', 'CT', 'CT', 'CT', 'CT',
'ACT', 'ACT', 'CT', 'ACT', 'CT', 'CT', 'CT', 'CT' ] ]
total_improvement = model.train( seqs,
algorithm='baum-welch',
verbose=False,
edge_inertia=0.5,
distribution_inertia=0.5,
max_iterations=5 )
assert_equal( round( total_improvement, 4 ), 81.5447 )
|
|
"""
Acquisition infrastructure shared by all modules.
"""
### import ####################################################################
import re
import os
import imp
import copy
import shutil
import pathlib
import time
import traceback
import appdirs
import toml
import numpy as np
import numexpr
from PySide2 import QtCore, QtWidgets
import WrightTools as wt
import pycmds.project.project_globals as g
import pycmds
import pycmds.hardware.spectrometers as spectrometers
import pycmds.hardware.delays as delays
import pycmds.hardware.opas.opas as opas
import pycmds.hardware.filters as filters
from pycmds.somatic._wt5 import create_data, write_data, close_data
all_hardwares = opas.hardwares + spectrometers.hardwares + delays.hardwares + filters.hardwares
from . import constant_resolver
### define ####################################################################
__here__ = pathlib.Path(__file__).parent
### container objects #########################################################
class Axis:
def __init__(self, points, units, name, identity, hardware_dict={}, **kwargs):
self.points = points
self.units = units
self.name = name
self.identity = identity
self.hardware_dict = hardware_dict.copy()
self.__dict__.update(kwargs)
# fill hardware dictionary with defaults
names = re.split("[=F]+", self.identity)
# KFS 2018-12-07: Is this still used at all? replacing wt2 kit.parse_identity
if "F" in self.identity: # last name should be a 'following' in this case
names.pop(-1)
for name in names:
if name[0] == "D":
clean_name = name.replace("D", "", 1)
else:
clean_name = name
if clean_name not in self.hardware_dict.keys():
hardware_object = [h for h in all_hardwares if h.name == clean_name][0]
self.hardware_dict[name] = [hardware_object, "set_position", None]
class Constant:
def __init__(self, units, name, identity, static=True, expression=""):
self.units = units
self.name = name
self.identity = identity
self.static = static
self.expression = expression
self.hardware = [h for h in all_hardwares if h.name == self.name][0]
class Destinations:
def __init__(self, arr, units, hardware, method, passed_args):
self.arr = arr # full scan shape
self.units = units
self.hardware = hardware
self.method = method
self.passed_args = passed_args
class Order:
def __init__(self, name, path):
self.name = name
self.module = imp.load_source(name, str(path))
self.process = self.module.process
orderers = []
orderers.append(Order("ndindex", __here__ / "order" / "ndindex.py"))
### Worker base ##############################################################
class Worker(QtCore.QObject):
update_ui = QtCore.Signal()
scan_complete = QtCore.Signal()
done = QtCore.Signal()
def __init__(self, aqn_path, queue_worker, finished):
# do not overload this method
QtCore.QObject.__init__(self)
self.aqn_path = aqn_path
self.aqn = wt.kit.INI(self.aqn_path)
self.queue_worker = queue_worker
self.finished = finished
# unpack
self.fraction_complete = self.queue_worker.fraction_complete
self.pause = self.queue_worker.queue_status.pause
self.paused = self.queue_worker.queue_status.paused
self.going = self.queue_worker.queue_status.going
self.stop = self.queue_worker.queue_status.stop
self.stopped = self.queue_worker.queue_status.stopped
# move aqn file into queue folder
ini = wt.kit.INI(aqn_path)
module_name = ini.read("info", "module")
item_name = ini.read("info", "name")
aqn_path = pathlib.Path(aqn_path)
aqn_index_str = str(self.queue_worker.index.read()).zfill(3)
aqn_name = " ".join([aqn_index_str, module_name, item_name]).rstrip()
folder_path = pathlib.Path(self.queue_worker.folder.read()).joinpath(aqn_name)
if aqn_path != folder_path.with_suffix(".aqn"):
shutil.copyfile(aqn_path, folder_path.with_suffix(".aqn"))
if aqn_path.parent == folder_path.parent:
aqn_path.unlink()
self.aqn_path = folder_path.with_suffix(".aqn")
self.aqn = wt.kit.INI(self.aqn_path)
self.folder = pathlib.Path(folder_path)
self.folder.mkdir(exist_ok=True)
# create acquisition folder
# initialize
self.scan_index = None
self.scan_folders = []
self.scan_urls = []
def process(self, scan_folder):
# get path
return # TODO:
data_path = record.data_path.read()
# make data object
data = wt.data.from_PyCMDS(data_path, verbose=False)
data.save(data_path.replace(".data", ".p"), verbose=False)
# make figures for each channel
data_path = pathlib.Path(data_path)
data_folder = data_path.parent
file_name = data_path.stem
file_extension = data_path.suffix
# chop data if over 2D
for channel_index, channel_name in enumerate(data.channel_names):
output_folder = data_folder if data.ndim <= 2 else data_folder / channel_name
output_folder.mkdir(exist_ok=True)
image_fname = channel_name + " " + file_name
if len(data.shape) == 1:
outs = wt.artists.quick1D(
data,
channel=channel_index,
autosave=True,
save_directory=output_folder,
fname=image_fname,
verbose=False,
)
else:
outs = wt.artists.quick2D(
data,
-1,
-2,
channel=channel_index,
autosave=True,
save_directory=output_folder,
fname=image_fname,
verbose=False,
)
# hack in a way to get the first image written
if channel_index == 0:
output_image_path = outs[0]
# upload
self.upload(self.scan_folders[self.scan_index], reference_image=output_image_path)
def scan(
self,
axes,
constants=[],
pre_wait_methods=[],
processing_method="process",
module_reserved="",
multiple_scans=False,
):
# do not overload this method
# scan index ----------------------------------------------------------
if self.scan_index is None:
self.scan_index = 0
else:
self.scan_index += 1
# create destination objects ------------------------------------------
# get destination arrays
if len(axes) == 1:
arrs = [axes[0].points]
else:
arrs = np.meshgrid(*[a.points for a in axes], indexing="ij")
# treat 'scan about center' axes
for axis_index, axis in enumerate(axes):
if axis.identity[0] == "D":
centers = axis.centers
# transpose so own index is first (all others slide down)
transpose_order = list(range(len(axes)))
transpose_order.insert(0, transpose_order.pop(axis_index))
arrs[axis_index] = np.transpose(arrs[axis_index], axes=transpose_order)
# add centers to transposed array
arrs[axis_index] += centers
# transpose out
transpose_order = list(range(len(axes)))
transpose_order.insert(axis_index, transpose_order.pop(0))
arrs[axis_index] = np.transpose(arrs[axis_index], axes=transpose_order)
# create destination objects
destinations_list = []
for i in range(len(axes)):
axis = axes[i]
arr = arrs[i]
for key in axis.hardware_dict.keys():
hardware = axis.hardware_dict[key][0]
method = axis.hardware_dict[key][1]
passed_args = axis.hardware_dict[key][2]
destinations = Destinations(arr, axis.units, hardware, method, passed_args)
destinations_list.append(destinations)
constant_dict = {c.name: c for c in constants}
for constant in constant_resolver.const_order(
**{c.name: c.expression for c in constants}
): # must follow axes
constant = constant_dict[constant]
if constant.static:
pass
else:
# initialize
expression = constant.expression
arr = np.full(arrs[0].shape, np.nan)
units = constant.units
units_kind = wt.units.kind(units)
vals = {}
# populate all hardwares not scanned here
for hardware in all_hardwares:
if wt.units.kind(hardware.units) == units_kind:
vals[hardware.name] = hardware.get_position(units)
for idx in np.ndindex(arrs[0].shape):
for destination in destinations_list:
if wt.units.kind(destination.units) == units_kind:
val = wt.units.converter(
destination.arr[idx], destination.units, units
)
vals[destination.hardware.name] = val
arr[idx] = numexpr.evaluate(expression, vals)
# finish
hardware = constant.hardware
destinations = Destinations(arr, units, hardware, "set_position", None)
destinations_list.insert(0, destinations)
# check if scan is valid for hardware ---------------------------------
# TODO: !!!
# run through aquisition order handler --------------------------------
order = orderers[0] # TODO: real orderer support
idxs, slices = order.process(destinations_list)
# initialize scan -----------------------------------------------------
g.queue_control.write(True)
self.going.write(True)
self.fraction_complete.write(0.0)
g.logger.log("info", "Scan begun", "")
# put info into headers -----------------------------------------------
# create scan folder
scan_folder = str(self.folder)
self.scan_folders.append(self.folder)
# create scan folder on google drive
if g.google_drive_enabled.read():
scan_url = g.google_drive_control.read().reserve_id(scan_folder)
self.scan_urls.append(g.google_drive_control.read().id_to_open_url(scan_folder))
else:
self.scan_urls.append(None)
# create data
headers = dict()
headers["name"] = self.aqn.read("info", "name")
headers["data info"] = self.aqn.read("info", "info")
headers["data origin"] = self.aqn.read("info", "module")
if g.google_drive_enabled.read():
headers["queue url"] = self.queue_worker.queue_url
headers["acquisition url"] = self.aqn.read("info", "url")
headers["scan url"] = scan_url
path = scan_folder + os.sep + "data.wt5"
create_data(
path, headers, destinations, axes, constants, hardware=all_hardwares, sensors=pycmds.sensors.sensors
)
# acquire -------------------------------------------------------------
self.fraction_complete.write(0.0)
slice_index = 0
npts = float(len(idxs))
for i, idx in enumerate(idxs):
idx = tuple(idx)
# launch hardware
for d in destinations_list:
destination = d.arr[idx]
if d.method == "set_position":
d.hardware.set_position(destination, d.units)
else:
inputs = copy.copy(d.passed_args)
for input_index, input_val in enumerate(inputs):
if input_val == "destination":
inputs[input_index] = destination
elif input_val == "units":
inputs[input_index] = d.units
d.hardware.q.push(d.method, *inputs)
# execute pre_wait_methods
for method in pre_wait_methods:
method()
# slice
if slice_index < len(slices): # takes care of last slice
if slices[slice_index]["index"] == i:
slice_index += 1
# wait for hardware
g.hardware_waits.wait()
# launch sensors
for s in pycmds.sensors.sensors:
s.measure()
# wait for sensors
for s in pycmds.sensors.sensors:
s.wait_until_still()
# save
write_data(idx=idx, hardware=all_hardwares, sensors=pycmds.sensors.sensors)
# update
self.fraction_complete.write(i / npts)
self.update_ui.emit()
# check continue
while self.pause.read():
self.paused.write(True)
self.pause.wait_for_update()
self.paused.write(False)
if self.stop.read():
self.stopped.write(True)
break
# finish scan ---------------------------------------------------------
close_data()
self.fraction_complete.write(1.0)
self.going.write(False)
g.queue_control.write(False)
g.logger.log("info", "Scan done", "")
self.update_ui.emit()
self.scan_complete.emit()
# process scan --------------------------------------------------------
try:
getattr(self, processing_method)(scan_folder)
except BaseException:
# Yeah, yeah, excepting BaseException.... KFS and BJT
# deal with it ---sunglasses--- ---BJT 2018-10-25
traceback.print_exc()
self.upload(scan_folder)
return scan_folder
def upload(self, scan_folder, message="scan complete", reference_image=None):
# create folder on google drive, upload reference image
if g.google_drive_enabled.read():
folder_url = g.google_drive_control.read().id_to_open_url(scan_folder)
g.google_drive_control.read().upload_folder(
path=scan_folder, parent_id=str(pathlib.Path(scan_folder).parent), id=scan_folder,
)
image_url = None
if reference_image is not None:
reference_id = f"{scan_folder} reference"
g.google_drive_control.read().reserve_id(reference_id)
image_url = g.google_drive_control.read().id_to_download_url(reference_id)
g.google_drive_control.read().create_file(
path=reference_image, parent_id=scan_folder, id=reference_id
)
else:
folder_url = image_url = None
# send message on slack
if g.slack_enabled.read():
if g.google_drive_enabled.read() and reference_image is not None:
start = time.time()
while time.time() - start < 10 and not g.google_drive_control.read().is_uploaded(
reference_id
):
time.sleep(0.01)
slack = g.slack_control.read()
field = {}
field["title"] = pathlib.Path(scan_folder).name
field["title_link"] = folder_url
field["image_url"] = image_url
message = ":tada: scan complete - {} elapsed".format(
g.progress_bar.time_elapsed.text()
)
slack.send_message(message, attachments=[field])
### GUI base ##################################################################
class GUI(QtCore.QObject):
"""Acquisition module gui."""
def __init__(self, module_name):
QtCore.QObject.__init__(self)
self.module_name = module_name
self.state_path = (
pathlib.Path(appdirs.user_data_dir("pycmds", "pycmds"))
/ "modules"
/ f"{self.module_name.lower()}.toml"
)
self.state_path.parent.mkdir(parents=True, exist_ok=True)
self.state_path.touch(exist_ok=True)
self.state = toml.load(self.state_path)
# create frame
self.layout = QtWidgets.QVBoxLayout()
self.layout.setMargin(0)
self.layout.setContentsMargins(0, 0, 0, 0)
self.create_frame() # add module-specific widgets to out layout
# device widget
# self.device_widget = record.Widget()
# self.layout.addWidget(self.device_widget)
# finish
self.frame = QtWidgets.QWidget()
self.frame.setLayout(self.layout)
def create_frame(self):
layout = QtWidgets.QVBoxLayout()
layout.setMargin(5)
# scan widget
layout.addWidget(self.scan.widget)
# finish
layout.addStretch(1)
self.frame = QtWidgets.QWidget()
self.frame.setLayout(layout)
g.module_widget.add_child(self.frame)
g.module_combobox.add_module(module_name, self.show_frame)
def hide(self):
self.frame.hide()
def on_sensor_settings_updated(self):
# overload this if your gui has device-dependent settings
pass
def save_state(self):
with open(self.state_path, "w") as f:
f.write(toml.dumps(self.state))
def show(self):
self.frame.show()
def update(self):
pass
|
|
# This file is part of Androguard.
#
# Copyright (C) 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file is a simplified version of writer.py that outputs an AST instead of source code."""
import struct
from androguard.decompiler.dad import basic_blocks, instruction, opcode_ins
from androguard.core.bytecodes.dvm_types import TYPE_DESCRIPTOR
def array_access(arr, ind):
return ['ArrayAccess', [arr, ind]]
def array_creation(tn, params, dim):
return ['ArrayCreation', [tn] + params, dim]
def array_initializer(params, tn=None):
return ['ArrayInitializer', params, tn]
def assignment(lhs, rhs, op=''):
return ['Assignment', [lhs, rhs], op]
def binary_infix(op, left, right):
return ['BinaryInfix', [left, right], op]
def cast(tn, arg):
return ['Cast', [tn, arg]]
def field_access(triple, left):
return ['FieldAccess', [left], triple]
def literal(result, tt):
return ['Literal', result, tt]
def local(name):
return ['Local', name]
def method_invocation(triple, name, base, params):
if base is None:
return ['MethodInvocation', params, triple, name, False]
return ['MethodInvocation', [base] + params, triple, name, True]
def parenthesis(expr):
return ['Parenthesis', [expr]]
def typen(baset, dim):
return ['TypeName', (baset, dim)]
def unary_prefix(op, left):
return ['Unary', [left], op, False]
def unary_postfix(left, op):
return ['Unary', [left], op, True]
def var_decl(typen, var):
return [typen, var]
def dummy(*args):
return ['Dummy', args]
################################################################################
def expression_stmt(expr):
return ['ExpressionStatement', expr]
def local_decl_stmt(expr, decl):
return ['LocalDeclarationStatement', expr, decl]
def return_stmt(expr):
return ['ReturnStatement', expr]
def throw_stmt(expr):
return ['ThrowStatement', expr]
def jump_stmt(keyword):
return ['JumpStatement', keyword, None]
def loop_stmt(isdo, cond_expr, body):
type_ = 'DoStatement' if isdo else 'WhileStatement'
return [type_, None, cond_expr, body]
def try_stmt(tryb, pairs):
return ['TryStatement', None, tryb, pairs]
def if_stmt(cond_expr, scopes):
return ['IfStatement', None, cond_expr, scopes]
def switch_stmt(cond_expr, ksv_pairs):
return ['SwitchStatement', None, cond_expr, ksv_pairs]
# Create empty statement block (statements to be appended later)
# Note, the code below assumes this can be modified in place
def statement_block():
return ['BlockStatement', None, []]
# Add a statement to the end of a statement block
def _append(sb, stmt):
assert (sb[0] == 'BlockStatement')
if stmt is not None:
sb[2].append(stmt)
def parse_descriptor(desc):
dim = 0
while desc and desc[0] == '[':
desc = desc[1:]
dim += 1
if desc in TYPE_DESCRIPTOR:
return typen('.' + TYPE_DESCRIPTOR[desc], dim)
if desc and desc[0] == 'L' and desc[-1] == ';':
return typen(desc[1:-1], dim)
# invalid descriptor (probably None)
return dummy(str(desc))
# Note: the literal_foo functions (and dummy) are also imported by decompile.py
def literal_string(s):
return literal(str(s), ('java/lang/String', 0))
def literal_class(desc):
return literal(parse_descriptor(desc), ('java/lang/Class', 0))
def literal_bool(b):
return literal(str(b).lower(), ('.boolean', 0))
def literal_int(b):
return literal(str(b), ('.int', 0))
def literal_hex_int(b):
return literal(hex(b), ('.int', 0))
def literal_long(b):
return literal(str(b) + 'L', ('.long', 0))
def literal_float(f):
return literal(str(f) + 'f', ('.float', 0))
def literal_double(f):
return literal(str(f), ('.double', 0))
def literal_null():
return literal('null', ('.null', 0))
def visit_decl(var, init_expr=None):
t = parse_descriptor(var.get_type())
v = local('v{}'.format(var.name))
return local_decl_stmt(init_expr, var_decl(t, v))
def visit_arr_data(value):
data = value.get_data()
tab = []
elem_size = value.element_width
if elem_size == 4:
for i in range(0, value.size * 4, 4):
tab.append(struct.unpack('<i', data[i:i + 4])[0])
else: # FIXME: other cases
for i in range(value.size):
tab.append(data[i])
return array_initializer(list(map(literal_int, tab)))
def write_inplace_if_possible(lhs, rhs):
if isinstance(rhs, instruction.BinaryExpression) and lhs == rhs.var_map[rhs.arg1]:
exp_rhs = rhs.var_map[rhs.arg2]
# post increment/decrement
if rhs.op in '+-' and isinstance(exp_rhs, instruction.Constant) and exp_rhs.get_int_value() == 1:
return unary_postfix(visit_expr(lhs), rhs.op * 2)
# compound assignment
return assignment(visit_expr(lhs), visit_expr(exp_rhs), op=rhs.op)
return assignment(visit_expr(lhs), visit_expr(rhs))
def visit_expr(op):
if isinstance(op, instruction.ArrayLengthExpression):
expr = visit_expr(op.var_map[op.array])
return field_access([None, 'length', None], expr)
if isinstance(op, instruction.ArrayLoadExpression):
array_expr = visit_expr(op.var_map[op.array])
index_expr = visit_expr(op.var_map[op.idx])
return array_access(array_expr, index_expr)
if isinstance(op, instruction.ArrayStoreInstruction):
array_expr = visit_expr(op.var_map[op.array])
index_expr = visit_expr(op.var_map[op.index])
rhs = visit_expr(op.var_map[op.rhs])
return assignment(array_access(array_expr, index_expr), rhs)
if isinstance(op, instruction.AssignExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.rhs
if lhs is None:
return visit_expr(rhs)
return write_inplace_if_possible(lhs, rhs)
if isinstance(op, instruction.BaseClass):
if op.clsdesc is None:
assert (op.cls == "super")
return local(op.cls)
return parse_descriptor(op.clsdesc)
if isinstance(op, instruction.BinaryExpression):
lhs = op.var_map.get(op.arg1)
rhs = op.var_map.get(op.arg2)
expr = binary_infix(op.op, visit_expr(lhs), visit_expr(rhs))
if not isinstance(op, instruction.BinaryCompExpression):
expr = parenthesis(expr)
return expr
if isinstance(op, instruction.CheckCastExpression):
lhs = op.var_map.get(op.arg)
return parenthesis(cast(parse_descriptor(op.clsdesc), visit_expr(lhs)))
if isinstance(op, instruction.ConditionalExpression):
lhs = op.var_map.get(op.arg1)
rhs = op.var_map.get(op.arg2)
return binary_infix(op.op, visit_expr(lhs), visit_expr(rhs))
if isinstance(op, instruction.ConditionalZExpression):
arg = op.var_map[op.arg]
if isinstance(arg, instruction.BinaryCompExpression):
arg.op = op.op
return visit_expr(arg)
expr = visit_expr(arg)
atype = str(arg.get_type())
if atype == 'Z':
if op.op == opcode_ins.Op.EQUAL:
expr = unary_prefix('!', expr)
elif atype in 'VBSCIJFD':
expr = binary_infix(op.op, expr, literal_int(0))
else:
expr = binary_infix(op.op, expr, literal_null())
return expr
if isinstance(op, instruction.Constant):
if op.type == 'Ljava/lang/String;':
return literal_string(op.cst)
elif op.type == 'Z':
return literal_bool(op.cst == 0)
elif op.type in 'ISCB':
return literal_int(op.cst2)
elif op.type in 'J':
return literal_long(op.cst2)
elif op.type in 'F':
return literal_float(op.cst)
elif op.type in 'D':
return literal_double(op.cst)
elif op.type == 'Ljava/lang/Class;':
return literal_class(op.clsdesc)
return dummy('??? Unexpected constant: ' + str(op.type))
if isinstance(op, instruction.FillArrayExpression):
array_expr = visit_expr(op.var_map[op.reg])
rhs = visit_arr_data(op.value)
return assignment(array_expr, rhs)
if isinstance(op, instruction.FilledArrayExpression):
tn = parse_descriptor(op.type)
params = [visit_expr(op.var_map[x]) for x in op.args]
return array_initializer(params, tn)
if isinstance(op, instruction.InstanceExpression):
triple = op.clsdesc[1:-1], op.name, op.ftype
expr = visit_expr(op.var_map[op.arg])
return field_access(triple, expr)
if isinstance(op, instruction.InstanceInstruction):
triple = op.clsdesc[1:-1], op.name, op.atype
lhs = field_access(triple, visit_expr(op.var_map[op.lhs]))
rhs = visit_expr(op.var_map[op.rhs])
return assignment(lhs, rhs)
if isinstance(op, instruction.InvokeInstruction):
base = op.var_map[op.base]
params = [op.var_map[arg] for arg in op.args]
params = list(map(visit_expr, params))
if op.name == '<init>':
if isinstance(base, instruction.ThisParam):
keyword = 'this' if base.type[1:-1] == op.triple[0] else 'super'
return method_invocation(op.triple, keyword, None, params)
elif isinstance(base, instruction.NewInstance):
return ['ClassInstanceCreation', op.triple, params,
parse_descriptor(base.type)]
else:
assert (isinstance(base, instruction.Variable))
# fallthrough to create dummy <init> call
return method_invocation(op.triple, op.name, visit_expr(base), params)
# for unmatched monitor instructions, just create dummy expressions
if isinstance(op, instruction.MonitorEnterExpression):
return dummy("monitor enter(", visit_expr(op.var_map[op.ref]), ")")
if isinstance(op, instruction.MonitorExitExpression):
return dummy("monitor exit(", visit_expr(op.var_map[op.ref]), ")")
if isinstance(op, instruction.MoveExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.var_map.get(op.rhs)
return write_inplace_if_possible(lhs, rhs)
if isinstance(op, instruction.MoveResultExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.var_map.get(op.rhs)
return assignment(visit_expr(lhs), visit_expr(rhs))
if isinstance(op, instruction.NewArrayExpression):
tn = parse_descriptor(op.type[1:])
expr = visit_expr(op.var_map[op.size])
return array_creation(tn, [expr], 1)
# create dummy expression for unmatched newinstance
if isinstance(op, instruction.NewInstance):
return dummy("new ", parse_descriptor(op.type))
if isinstance(op, instruction.Param):
if isinstance(op, instruction.ThisParam):
return local('this')
return local('p{}'.format(op.v))
if isinstance(op, instruction.StaticExpression):
triple = op.clsdesc[1:-1], op.name, op.ftype
return field_access(triple, parse_descriptor(op.clsdesc))
if isinstance(op, instruction.StaticInstruction):
triple = op.clsdesc[1:-1], op.name, op.ftype
lhs = field_access(triple, parse_descriptor(op.clsdesc))
rhs = visit_expr(op.var_map[op.rhs])
return assignment(lhs, rhs)
if isinstance(op, instruction.SwitchExpression):
return visit_expr(op.var_map[op.src])
if isinstance(op, instruction.UnaryExpression):
lhs = op.var_map.get(op.arg)
if isinstance(op, instruction.CastExpression):
expr = cast(parse_descriptor(op.clsdesc), visit_expr(lhs))
else:
expr = unary_prefix(op.op, visit_expr(lhs))
return parenthesis(expr)
if isinstance(op, instruction.Variable):
# assert(op.declared)
return local('v{}'.format(op.name))
return dummy('??? Unexpected op: ' + type(op).__name__)
def visit_ins(op, isCtor=False):
if isinstance(op, instruction.ReturnInstruction):
expr = None if op.arg is None else visit_expr(op.var_map[op.arg])
return return_stmt(expr)
elif isinstance(op, instruction.ThrowExpression):
return throw_stmt(visit_expr(op.var_map[op.ref]))
elif isinstance(op, instruction.NopExpression):
return None
# Local var decl statements
if isinstance(op, (instruction.AssignExpression, instruction.MoveExpression,
instruction.MoveResultExpression)):
lhs = op.var_map.get(op.lhs)
rhs = op.rhs if isinstance(
op, instruction.AssignExpression) else op.var_map.get(op.rhs)
if isinstance(lhs, instruction.Variable) and not lhs.declared:
lhs.declared = True
expr = visit_expr(rhs)
return visit_decl(lhs, expr)
# skip this() at top of constructors
if isCtor and isinstance(op, instruction.AssignExpression):
op2 = op.rhs
if op.lhs is None and isinstance(op2, instruction.InvokeInstruction):
if op2.name == '<init>' and len(op2.args) == 0:
if isinstance(op2.var_map[op2.base], instruction.ThisParam):
return None
# MoveExpression is skipped when lhs = rhs
if isinstance(op, instruction.MoveExpression):
if op.var_map.get(op.lhs) is op.var_map.get(op.rhs):
return None
return expression_stmt(visit_expr(op))
class JSONWriter:
def __init__(self, graph, method):
self.graph = graph
self.method = method
self.visited_nodes = set()
self.loop_follow = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.latch_node = [None]
self.try_follow = [None]
self.next_case = None
self.need_break = True
self.constructor = False
self.context = []
# This class is created as a context manager so that it can be used like
# with self as foo:
# ...
# which pushes a statement block on to the context stack and assigns it to foo
# within the with block, all added instructions will be added to foo
def __enter__(self):
self.context.append(statement_block())
return self.context[-1]
def __exit__(self, *args):
self.context.pop()
return False
# Add a statement to the current context
def add(self, val):
_append(self.context[-1], val)
def visit_ins(self, op):
self.add(visit_ins(op, isCtor=self.constructor))
# Note: this is a mutating operation
def get_ast(self):
m = self.method
flags = m.access
if 'constructor' in flags:
flags.remove('constructor')
self.constructor = True
params = m.lparams[:]
if 'static' not in m.access:
params = params[1:]
# DAD doesn't create any params for abstract methods
if len(params) != len(m.params_type):
assert ('abstract' in flags or 'native' in flags)
assert (not params)
params = list(range(len(m.params_type)))
paramdecls = []
for ptype, name in zip(m.params_type, params):
t = parse_descriptor(ptype)
v = local('p{}'.format(name))
paramdecls.append(var_decl(t, v))
if self.graph is None:
body = None
else:
with self as body:
self.visit_node(self.graph.entry)
return {
'triple': m.triple,
'flags': flags,
'ret': parse_descriptor(m.type),
'params': paramdecls,
'comments': [],
'body': body,
}
def _visit_condition(self, cond):
if cond.isnot:
cond.cond1.neg()
left = parenthesis(self.get_cond(cond.cond1))
right = parenthesis(self.get_cond(cond.cond2))
op = '&&' if cond.isand else '||'
res = binary_infix(op, left, right)
return res
def get_cond(self, node):
if isinstance(node, basic_blocks.ShortCircuitBlock):
return self._visit_condition(node.cond)
elif isinstance(node, basic_blocks.LoopBlock):
return self.get_cond(node.cond)
else:
assert (type(node) == basic_blocks.CondBlock)
assert (len(node.ins) == 1)
return visit_expr(node.ins[-1])
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1],
self.try_follow[-1]):
return
if not node.type.is_return and node in self.visited_nodes:
return
self.visited_nodes.add(node)
for var in node.var_to_declare:
if not var.declared:
self.add(visit_decl(var))
var.declared = True
node.visit(self)
def visit_loop_node(self, loop):
isDo = cond_expr = body = None
follow = loop.follow['loop']
if loop.looptype.is_pretest:
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
isDo = False
cond_expr = self.get_cond(loop)
elif loop.looptype.is_posttest:
isDo = True
self.latch_node.append(loop.latch)
elif loop.looptype.is_endless:
isDo = False
cond_expr = literal_bool(True)
with self as body:
self.loop_follow.append(follow)
if loop.looptype.is_pretest:
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
if loop.looptype.is_pretest:
pass
elif loop.looptype.is_posttest:
self.latch_node.pop()
cond_expr = self.get_cond(loop.latch)
else:
self.visit_node(loop.latch)
assert (cond_expr is not None and isDo is not None)
self.add(loop_stmt(isDo, cond_expr, body))
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
cond_expr = None
scopes = []
follow = cond.follow['if']
if cond.false is cond.true:
self.add(expression_stmt(self.get_cond(cond)))
self.visit_node(cond.true)
return
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
if self.loop_follow[-1] in (cond.true, cond.false):
cond_expr = self.get_cond(cond)
with self as scope:
self.add(jump_stmt('break'))
scopes.append(scope)
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.add(if_stmt(cond_expr, scopes))
elif follow is not None:
if cond.true in (follow, self.next_case) or \
cond.num > cond.true.num:
# or cond.true.num > cond.false.num:
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if cond.true: # in self.visited_nodes:
cond_expr = self.get_cond(cond)
with self as scope:
self.visit_node(cond.true)
scopes.append(scope)
is_else = not (follow in (cond.true, cond.false))
if is_else and cond.false not in self.visited_nodes:
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.if_follow.pop()
self.add(if_stmt(cond_expr, scopes))
self.visit_node(follow)
else:
cond_expr = self.get_cond(cond)
with self as scope:
self.visit_node(cond.true)
scopes.append(scope)
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.add(if_stmt(cond_expr, scopes))
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
cond_expr = visit_expr(switch_ins)
ksv_pairs = []
follow = switch.follow['switch']
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
cur_ks = switch.node_to_case[node][:]
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
cur_ks.append(None)
default = None
with self as body:
self.visit_node(node)
if self.need_break:
self.add(jump_stmt('break'))
else:
self.need_break = True
ksv_pairs.append((cur_ks, body))
if default not in (None, follow):
with self as body:
self.visit_node(default)
ksv_pairs.append(([None], body))
self.add(switch_stmt(cond_expr, ksv_pairs))
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 1:
if sucs[0] is self.loop_follow[-1]:
self.add(jump_stmt('break'))
elif sucs[0] is self.next_case:
self.need_break = False
else:
self.visit_node(sucs[0])
def visit_try_node(self, try_node):
with self as tryb:
self.try_follow.append(try_node.follow)
self.visit_node(try_node.try_start)
pairs = []
for catch_node in try_node.catch:
if catch_node.exception_ins:
ins = catch_node.exception_ins
assert (isinstance(ins, instruction.MoveExceptionExpression))
var = ins.var_map[ins.ref]
var.declared = True
ctype = var.get_type()
name = 'v{}'.format(var.name)
else:
ctype = catch_node.catch_type
name = '_'
catch_decl = var_decl(parse_descriptor(ctype), local(name))
with self as body:
self.visit_node(catch_node.catch_start)
pairs.append((catch_decl, body))
self.add(try_stmt(tryb, pairs))
self.visit_node(self.try_follow.pop())
def visit_return_node(self, ret):
self.need_break = False
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins)
|
|
from __future__ import absolute_import, division, print_function
from collections import OrderedDict, Iterator
import copy
from functools import partial
from hashlib import md5
import inspect
import pickle
import os
import uuid
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from . import sharedict
from .compatibility import bind_method, unicode, PY3
from .context import _globals
from .core import flatten
from .utils import Dispatch, ensure_dict
from .sharedict import ShareDict
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
"""
Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def persist(self, **kwargs):
""" Persist this dask collection into memory
See ``dask.base.persist`` for full docstring
"""
(result,) = persist(self, **kwargs)
return result
def compute(self, **kwargs):
""" Compute this dask collection
This turns a lazy Dask collection into its in-memory equivalent.
For example a Dask.array turns into a NumPy array and a Dask.dataframe
turns into a Pandas dataframe. The entire dataset must fit into memory
before calling this operation.
Parameters
----------
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default
is to check the global settings first, and then fall back to
the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
"""
(result,) = compute(self, traverse=False, **kwargs)
return result
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = optimization_function(cls)(ensure_dict(dsk), keys, **kwargs)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg', 'pos'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne', 'getitem'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If it is a dask object, it's computed and the
result is returned. By default, python builtin collections are also
traversed to look for dask objects (for more information see the
``traverse`` keyword). Non-dask arguments are passed through unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``compute``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults for
the collections.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
By default, dask objects inside python collections will also be computed:
>>> compute({'a': a, 'b': b, 'c': 1}) # doctest: +SKIP
({'a': 45, 'b': 4.5, 'c': 1},)
"""
from dask.delayed import delayed
traverse = kwargs.pop('traverse', True)
if traverse:
args = tuple(delayed(a)
if isinstance(a, (list, set, tuple, dict, Iterator))
else a for a in args)
optimize_graph = kwargs.pop('optimize_graph', True)
variables = [a for a in args if isinstance(a, Base)]
if not variables:
return args
get = kwargs.pop('get', None) or _globals['get']
if not get:
get = variables[0]._default_get
if not all(a._default_get == get for a in variables):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
dsk = collections_to_dsk(variables, optimize_graph, **kwargs)
keys = [var._keys() for var in variables]
results = get(dsk, keys, **kwargs)
results_iter = iter(results)
return tuple(a if not isinstance(a, Base)
else a._finalize(next(results_iter))
for a in args)
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if isinstance(arg, Base)]
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks.extend([optimization_function(arg)(ensure_dict(arg.dask), arg._keys())
for arg in args])
else:
dsks.extend([arg.dask for arg in args])
dsk = merge(dsks)
return dot_graph(dsk, filename=filename, **kwargs)
function_cache = {}
def normalize_function(func):
try:
return function_cache[func]
except KeyError:
result = _normalize_function(func)
if len(function_cache) >= 500: # clear half of cache if full
for k in list(function_cache)[::2]:
del function_cache[k]
function_cache[func] = result
return result
except TypeError: # not hashable
return _normalize_function(func)
def _normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b'__main__' not in result: # abort on dynamic functions
return result
except:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, unicode, bytes, type(None), type,
slice, complex),
identity)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register(set)
def normalize_set(s):
return normalize_token(sorted(s, key=str))
@normalize_token.register((tuple, list))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@normalize_token.register(object)
def normalize_object(o):
if callable(o):
return normalize_function(o)
else:
return uuid.uuid4().hex
@normalize_token.register(Base)
def normalize_base(b):
return type(b).__name__, b.key
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
@normalize_token.register(pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and getattr(x, 'filename', None):
if hasattr(x.base, 'ctypes'):
offset = (x.ctypes.get_as_parameter().value -
x.base.ctypes.get_as_parameter().value)
else:
offset = 0 # root memmap's have mmap object as base
return (x.filename, os.path.getmtime(x.filename), x.dtype,
x.shape, x.strides, offset)
if x.dtype.hasobject:
try:
data = md5('-'.join(x.flat).encode('utf-8')).hexdigest()
except TypeError:
data = md5(b'-'.join([unicode(item).encode('utf-8') for item in
x.flat])).hexdigest()
else:
try:
data = md5(x.ravel().view('i1').data).hexdigest()
except (BufferError, AttributeError, ValueError):
data = md5(x.copy().ravel().view('i1').data).hexdigest()
return (data, x.dtype, x.shape, x.strides)
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
@normalize_token.register(np.ufunc)
def normalize_ufunc(x):
try:
name = x.__name__
if getattr(np, name) is x:
return 'np.' + name
except:
return normalize_function(x)
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
def dont_optimize(dsk, keys):
return dsk
def optimization_function(obj):
if isinstance(obj, type):
cls = obj
else:
cls = type(obj)
name = cls.__name__.lower() + '_optimize' # dask.set_options(array_optimize=foo)
if name in _globals:
return _globals[name] or dont_optimize
try:
return cls._optimize
except AttributeError:
return dont_optimize
def collections_to_dsk(collections, optimize_graph=True, **kwargs):
"""
Convert many collections into a single dask graph, after optimization
"""
optimizations = (kwargs.pop('optimizations', None) or
_globals.get('optimizations', []))
if optimize_graph:
groups = groupby(optimization_function, collections)
groups = {opt: _extract_graph_and_keys(val)
for opt, val in groups.items()}
for opt in optimizations:
groups = {k: [opt(ensure_dict(dsk), keys), keys]
for k, (dsk, keys) in groups.items()}
dsk = merge([opt(dsk, keys, **kwargs)
for opt, (dsk, keys) in groups.items()])
else:
dsk = ensure_dict(sharedict.merge(*[c.dask for c in collections]))
return dsk
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() v in vals]``."""
dsk = {}
keys = []
for v in vals:
d = v.dask
if type(d) is ShareDict:
for dd in d.dicts.values():
dsk.update(dd)
else:
dsk.update(v.dask)
keys.append(v._keys())
return dsk, keys
def redict_collection(c, dsk):
cc = copy.copy(c)
cc.dask = dsk
return cc
def persist(*args, **kwargs):
""" Persist multiple Dask collections into memory
This turns lazy Dask collections into Dask collections with the same
metadata, but now with their results fully computed or actively computing
in the background.
For example a lazy dask.array built up from many lazy calls will now be a
dask.array of the same shape, dtype, chunks, etc., but now with all of
those previously lazy tasks either computed in memory as many small NumPy
arrays (in the single-machine case) or asynchronously running in the
background on a cluster (in the distributed case).
This function operates differently if a ``dask.distributed.Client`` exists
and is connected to a distributed scheduler. In this case this function
will return as soon as the task graph has been submitted to the cluster,
but before the computations have completed. Computations will continue
asynchronously in the background. When using this function with the single
machine scheduler it blocks until the computations have finished.
When using Dask on a single machine you should ensure that the dataset fits
entirely within memory.
Examples
--------
>>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP
>>> df = df[df.name == 'Alice'] # doctest: +SKIP
>>> df['in-debt'] = df.balance < 0 # doctest: +SKIP
>>> df = df.persist() # triggers computation # doctest: +SKIP
>>> df.value().min() # future computations are now fast # doctest: +SKIP
-10
>>> df.value().max() # doctest: +SKIP
100
>>> from dask import persist # use persist function on multiple collections
>>> a, b = persist(a, b) # doctest: +SKIP
Parameters
----------
*args: Dask collections
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default
is to check the global settings first, and then fall back to
the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler ``get`` function.
Returns
-------
New dask collections backed by in-memory data
"""
collections = [a for a in args if isinstance(a, Base)]
if not collections:
return args
get = kwargs.pop('get', None) or _globals['get']
if inspect.ismethod(get):
try:
from distributed.client import default_client
except ImportError:
pass
else:
try:
client = default_client()
except ValueError:
pass
else:
if client.get == _globals['get']:
collections = client.persist(collections, **kwargs)
if isinstance(collections, list): # distributed is inconsistent here
collections = tuple(collections)
else:
collections = (collections,)
results_iter = iter(collections)
return tuple(a if not isinstance(a, Base)
else next(results_iter)
for a in args)
optimize_graph = kwargs.pop('optimize_graph', True)
if not get:
get = collections[0]._default_get
if not all(a._default_get == get for a in collections):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys = list(flatten([var._keys() for var in collections]))
results = get(dsk, keys, **kwargs)
d = dict(zip(keys, results))
result = [redict_collection(c, {k: d[k]
for k in flatten(c._keys())})
for c in collections]
results_iter = iter(result)
return tuple(a if not isinstance(a, Base)
else next(results_iter)
for a in args)
if PY3:
Base.persist.__doc__ = persist.__doc__
|
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_HTTPError,
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", or "m3u8_native".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language_preference Is this in the correct requested
language?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
creator: The main artist who created the video.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title" and "id" attributes with the same
semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occured.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occured.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return cls.__name__[:-2]
@property
def IE_NAME(self):
return type(self).__name__[:-2]
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if os.name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
def _get_tfa_info(self):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
return None
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if display_name is None:
display_name = name
return self._html_search_regex(
self._meta_regex(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower(), None)
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower(), None)
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
@staticmethod
def _hidden_inputs(html):
return dict([
(input.group('name'), input.group('value')) for input in re.finditer(
r'''(?x)
<input\s+
type=(?P<q_hidden>["\'])hidden(?P=q_hidden)\s+
name=(?P<q_name>["\'])(?P<name>.+?)(?P=q_name)\s+
(?:id=(?P<q_id>["\']).+?(?P=q_id)\s+)?
value=(?P<q_value>["\'])(?P<value>.*?)(?P=q_value)
''', html)
])
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?s)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
preference = f.get('preference')
if preference is None:
proto = f.get('protocol')
if proto is None:
proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
preference = 0 if proto in ['http', 'https'] else -0.1
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip()):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source)
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
for i, media_el in enumerate(media_nodes):
if manifest_version == '2.0':
media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
if determine_ext(manifest_url) == 'f4m':
formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id))
continue
tbr = int_or_none(media_el.attrib.get('bitrate'))
formats.append({
'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
'url': manifest_url,
'ext': 'flv',
'tbr': tbr,
'width': int_or_none(media_el.attrib.get('width')),
'height': int_or_none(media_el.attrib.get('height')),
'preference': preference,
})
self._sort_formats(formats)
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True):
formats = [{
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 1 if preference else -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if m3u8_doc is False:
return m3u8_doc
last_info = None
last_media = None
kv_rex = re.compile(
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_info[m.group('key')] = v
elif line.startswith('#EXT-X-MEDIA:'):
last_media = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_media[m.group('key')] = v
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
# TODO: looks like video codec is not always necessarily goes first
va_codecs = codecs.split(',')
if va_codecs[0]:
f['vcodec'] = va_codecs[0].partition('.')[0]
if len(va_codecs) > 1 and va_codecs[1]:
f['acodec'] = va_codecs[1].partition('.')[0]
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
if last_media is not None:
f['m3u8_media'] = last_media
last_media = None
formats.append(f)
last_info = {}
self._sort_formats(formats)
return formats
# TODO: improve extraction
def _extract_smil_formats(self, smil_url, video_id, fatal=True):
smil = self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal)
if smil is False:
assert not fatal
return []
base = smil.find('./head/meta').get('base')
formats = []
rtmp_count = 0
if smil.findall('./body/seq/video'):
video = smil.findall('./body/seq/video')[0]
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
formats.extend(fmts)
else:
for video in smil.findall('./body/switch/video'):
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
formats.extend(fmts)
self._sort_formats(formats)
return formats
def _parse_smil_video(self, video, video_id, base, rtmp_count):
src = video.get('src')
if not src:
return [], rtmp_count
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
proto = video.get('proto')
if not proto:
if base:
if base.startswith('rtmp'):
proto = 'rtmp'
elif base.startswith('http'):
proto = 'http'
ext = video.get('ext')
if proto == 'm3u8':
return self._extract_m3u8_formats(src, video_id, ext), rtmp_count
elif proto == 'rtmp':
rtmp_count += 1
streamer = video.get('streamer') or base
return ([{
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
}], rtmp_count)
elif proto.startswith('http'):
return ([{
'url': base + src,
'ext': ext or 'flv',
'tbr': bitrate,
'width': width,
'height': height,
}], rtmp_count)
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = compat_urllib_request.Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if 'playlist' in tc:
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError("This method must be implemented by subclasses")
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
|
"""
topology.models -- for Schedconfig and other topology-related objects
"""
from django.db import models
from core.settings.config import DB_SCHEMA_PANDA
# Create your models here.
class Schedconfig(models.Model):
name = models.CharField(max_length=180, db_column='NAME')
nickname = models.CharField(max_length=180, primary_key=True, db_column='NICKNAME')
queue = models.CharField(max_length=180, db_column='QUEUE', blank=True)
localqueue = models.CharField(max_length=60, db_column='LOCALQUEUE', blank=True)
system = models.CharField(max_length=180, db_column='SYSTEM')
sysconfig = models.CharField(max_length=60, db_column='SYSCONFIG', blank=True)
environ = models.CharField(max_length=750, db_column='ENVIRON', blank=True)
gatekeeper = models.CharField(max_length=120, db_column='GATEKEEPER', blank=True)
jobmanager = models.CharField(max_length=240, db_column='JOBMANAGER', blank=True)
ddm = models.CharField(max_length=360, db_column='DDM', blank=True)
jdladd = models.CharField(max_length=1500, db_column='JDLADD', blank=True)
globusadd = models.CharField(max_length=300, db_column='GLOBUSADD', blank=True)
jdl = models.CharField(max_length=180, db_column='JDL', blank=True)
jdltxt = models.CharField(max_length=1500, db_column='JDLTXT', blank=True)
version = models.CharField(max_length=180, db_column='VERSION', blank=True)
site = models.CharField(max_length=180, db_column='site')
region = models.CharField(max_length=180, db_column='REGION', blank=True)
gstat = models.CharField(max_length=180, db_column='GSTAT', blank=True)
tags = models.CharField(max_length=600, db_column='TAGS', blank=True)
cmd = models.CharField(max_length=600, db_column='CMD', blank=True)
lastmod = models.DateTimeField(db_column='LASTMOD')
errinfo = models.CharField(max_length=240, db_column='ERRINFO', blank=True)
nqueue = models.IntegerField(db_column='NQUEUE')
comment_field = models.CharField(max_length=1500, db_column='comment_', blank=True) # Field renamed because it was a Python reserved word.
appdir = models.CharField(max_length=1500, db_column='APPDIR', blank=True)
datadir = models.CharField(max_length=240, db_column='DATADIR', blank=True)
tmpdir = models.CharField(max_length=240, db_column='TMPDIR', blank=True)
wntmpdir = models.CharField(max_length=240, db_column='WNTMPDIR', blank=True)
dq2url = models.CharField(max_length=240, db_column='DQ2URL', blank=True)
special_par = models.CharField(max_length=240, db_column='SPECIAL_PAR', blank=True)
python_path = models.CharField(max_length=240, db_column='PYTHON_PATH', blank=True)
nodes = models.IntegerField(db_column='NODES')
status = models.CharField(max_length=30, db_column='status', blank=True)
copytool = models.CharField(max_length=240, db_column='COPYTOOL', blank=True)
releases = models.CharField(max_length=1500, db_column='RELEASES', blank=True)
envsetup = models.CharField(max_length=600, db_column='ENVSETUP', blank=True)
lfcpath = models.CharField(max_length=240, db_column='LFCPATH', blank=True)
lfchost = models.CharField(max_length=240, db_column='LFCHOST', blank=True)
cloud = models.CharField(max_length=180, db_column='cloud', blank=True)
siteid = models.CharField(max_length=180, db_column='siteid', blank=True)
proxy = models.CharField(max_length=240, db_column='PROXY', blank=True)
retry = models.CharField(max_length=30, db_column='RETRY', blank=True)
queuehours = models.IntegerField(db_column='QUEUEHOURS')
envsetupin = models.CharField(max_length=600, db_column='ENVSETUPIN', blank=True)
lfcprodpath = models.CharField(max_length=240, db_column='LFCPRODPATH', blank=True)
recoverdir = models.CharField(max_length=240, db_column='RECOVERDIR', blank=True)
memory = models.IntegerField(db_column='MEMORY')
maxtime = models.IntegerField(db_column='MAXTIME')
space = models.IntegerField(db_column='SPACE')
tspace = models.DateTimeField(db_column='TSPACE')
cmtconfig = models.CharField(max_length=750, db_column='CMTCONFIG', blank=True)
glexec = models.CharField(max_length=30, db_column='GLEXEC', blank=True)
priorityoffset = models.CharField(max_length=180, db_column='PRIORITYOFFSET', blank=True)
allowedgroups = models.CharField(max_length=300, db_column='ALLOWEDGROUPS', blank=True)
defaulttoken = models.CharField(max_length=300, db_column='DEFAULTTOKEN', blank=True)
pcache = models.CharField(max_length=300, db_column='PCACHE', blank=True)
validatedreleases = models.CharField(max_length=1500, db_column='VALIDATEDRELEASES', blank=True)
accesscontrol = models.CharField(max_length=60, db_column='ACCESSCONTROL', blank=True)
dn = models.CharField(max_length=300, db_column='DN', blank=True)
email = models.CharField(max_length=180, db_column='EMAIL', blank=True)
allowednode = models.CharField(max_length=240, db_column='ALLOWEDNODE', blank=True)
maxinputsize = models.IntegerField(null=True, db_column='MAXINPUTSIZE', blank=True)
timefloor = models.IntegerField(null=True, db_column='TIMEFLOOR', blank=True)
depthboost = models.IntegerField(null=True, db_column='DEPTHBOOST', blank=True)
idlepilotsupression = models.IntegerField(null=True, db_column='IDLEPILOTSUPRESSION', blank=True)
pilotlimit = models.IntegerField(null=True, db_column='PILOTLIMIT', blank=True)
transferringlimit = models.IntegerField(null=True, db_column='TRANSFERRINGLIMIT', blank=True)
cachedse = models.IntegerField(null=True, db_column='CACHEDSE', blank=True)
corecount = models.IntegerField(null=True, db_column='CORECOUNT', blank=True)
countrygroup = models.CharField(max_length=192, db_column='COUNTRYGROUP', blank=True)
availablecpu = models.CharField(max_length=192, db_column='AVAILABLECPU', blank=True)
availablestorage = models.CharField(max_length=192, db_column='AVAILABLESTORAGE', blank=True)
pledgedcpu = models.CharField(max_length=192, db_column='PLEDGEDCPU', blank=True)
pledgedstorage = models.CharField(max_length=192, db_column='PLEDGEDSTORAGE', blank=True)
statusoverride = models.CharField(max_length=768, db_column='STATUSOVERRIDE', blank=True)
allowdirectaccess = models.CharField(max_length=30, db_column='ALLOWDIRECTACCESS', blank=True)
gocname = models.CharField(max_length=192, db_column='gocname', blank=True)
tier = models.CharField(max_length=45, db_column='tier', blank=True)
multicloud = models.CharField(max_length=192, db_column='MULTICLOUD', blank=True)
lfcregister = models.CharField(max_length=30, db_column='LFCREGISTER', blank=True)
stageinretry = models.IntegerField(null=True, db_column='STAGEINRETRY', blank=True)
stageoutretry = models.IntegerField(null=True, db_column='STAGEOUTRETRY', blank=True)
fairsharepolicy = models.CharField(max_length=1536, db_column='FAIRSHAREPOLICY', blank=True)
allowfax = models.CharField(null=True, max_length=64, db_column='ALLOWFAX', blank=True)
faxredirector = models.CharField(null=True, max_length=256, db_column='FAXREDIRECTOR', blank=True)
maxwdir = models.IntegerField(null=True, db_column='MAXWDIR', blank=True)
celist = models.CharField(max_length=12000, db_column='CELIST', blank=True)
minmemory = models.IntegerField(null=True, db_column='MINMEMORY', blank=True)
maxmemory = models.IntegerField(null=True, db_column='MAXMEMORY', blank=True)
minrss = models.IntegerField(null=True, db_column='MINRSS', blank=True)
maxrss = models.IntegerField(null=True, db_column='MAXRSS', blank=True)
mintime = models.IntegerField(null=True, db_column='MINTIME', blank=True)
allowjem = models.CharField(null=True, max_length=64, db_column='ALLOWJEM', blank=True)
catchall = models.CharField(null=True, max_length=512, db_column='catchall', blank=True)
faxdoor = models.CharField(null=True, max_length=128, db_column='FAXDOOR', blank=True)
wansourcelimit = models.IntegerField(null=True, db_column='WANSOURCELIMIT', blank=True)
wansinklimit = models.IntegerField(null=True, db_column='WANSINKLIMIT', blank=True)
auto_mcu = models.SmallIntegerField(null=True, db_column='AUTO_MCU', blank=True)
objectstore = models.CharField(null=True, max_length=512, db_column='objectstore', blank=True)
allowhttp = models.CharField(null=True, max_length=64, db_column='ALLOWHTTP', blank=True)
httpredirector = models.CharField(null=True, max_length=256, db_column='HTTPREDIRECTOR', blank=True)
multicloud_append = models.CharField(null=True, max_length=64, db_column='MULTICLOUD_APPEND', blank=True)
corepower = models.IntegerField(null=True, db_column='corepower', blank=True)
#Were added 21.12.17
directaccesslan = models.CharField(null=True, max_length=64, db_column='DIRECT_ACCESS_LAN', blank=True)
directaccesswan = models.CharField(null=True, max_length=64, db_column='DIRECT_ACCESS_WAN', blank=True)
wnconnectivy = models.CharField(null=True, max_length=256, db_column='WNCONNECTIVITY', blank=True)
cloudrshare = models.CharField(null=True, max_length=256, db_column='CLOUDRSHARE', blank=True)
sitershare = models.CharField(null=True, max_length=256, db_column='SITERSHARE', blank=True)
autosetup_post = models.CharField(null=True, max_length=512, db_column='AUTOSETUP_POST', blank=True)
autosetup_pre = models.CharField(null=True, max_length=512, db_column='AUTOSETUP_PRE', blank=True)
use_newmover = models.CharField(null=True, max_length=32, db_column='USE_NEWMOVER', blank=True)
pilotversion = models.CharField(null=True, max_length=32, db_column='PILOTVERSION', blank=True)
objectstores = models.CharField(null=True, max_length=4000, db_column='OBJECTSTORES', blank=True)
container_options = models.CharField(null=True, max_length=1024, db_column='CONTAINER_OPTIONS',blank=True)
container_type = models.CharField(null=True, max_length=256, db_column='CONTAINER_TYPE', blank=True)
jobseed = models.CharField(null=True, max_length=16, db_column='JOBSEED', blank=True)
pilot_manager = models.CharField(null=True, max_length=16, db_column='PILOT_MANAGER', blank=True)
def __str__(self):
return 'Schedconfig:' + str(self.nickname)
def getFields(self):
return ["name", "nickname", "queue", "localqueue", "system", \
"sysconfig", "environ", "gatekeeper", "jobmanager", "ddm", \
"jdladd", "globusadd", "jdl", "jdltxt", "version", "site", \
"region", "gstat", "tags", "cmd", "lastmod", "errinfo", \
"nqueue", "comment_", "appdir", "datadir", "tmpdir", "wntmpdir", \
"dq2url", "special_par", "python_path", "nodes", "status", \
"copytool", "releases", "envsetup", \
"lfcpath", "lfchost", \
"cloud", "siteid", "proxy", "retry", "queuehours", "envsetupin", \
"lfcprodpath", \
"recoverdir", "memory", "maxtime", "space", \
"tspace", "cmtconfig", "glexec", "priorityoffset", \
"allowedgroups", "defaulttoken", "pcache", "validatedreleases", \
"accesscontrol", "dn", "email", "allowednode", "maxinputsize", \
"timefloor", "depthboost", "idlepilotsupression", "pilotlimit", \
"transferringlimit", "cachedse", "corecount", "countrygroup", \
"availablecpu", "availablestorage", "pledgedcpu", \
"pledgedstorage", "statusoverride", "allowdirectaccess", \
"gocname", "tier", "multicloud", "lfcregister", "stageinretry", \
"stageoutretry", "fairsharepolicy", "allowfax", "faxredirector", \
"maxwdir", "celist", "minmemory", "maxmemory", "mintime", \
"allowjem", "catchall", "faxdoor", "wansourcelimit", \
"wansinklimit", "auto_mcu", "objectstore", "allowhttp", \
"httpredirector", "multicloud_append","direct_access_lan","direct_access_wan", \
"wnconnectivy", "cloudrshare", "sitershare","autosetup_post","autosetup_pre","use_newmover","pilotversion" \
"objectstores","container_options","container_type","jobseed","pilot_manager"
]
def getValuesList(self):
repre = []
for field in self._meta.fields:
repre.append((field.name, field))
return repre
def get_all_fields(self):
"""Returns a list of all field names on the instance."""
fields = []
kys = {}
for f in self._meta.fields:
kys[f.name] = f
kys1 = kys.keys()
kys1 = sorted(kys1)
for k in kys1:
f = kys[k]
fname = f.name
# resolve picklists/choices, with get_xyz_display() function
get_choice = 'get_'+fname+'_display'
if hasattr( self, get_choice):
value = getattr( self, get_choice)()
else:
try :
value = getattr(self, fname)
except User.DoesNotExist:
value = None
# only display fields with values and skip some fields entirely
if f.editable and value :
fields.append(
{
'label':f.verbose_name,
'name':f.name,
'value':value,
}
)
return fields
class Meta:
db_table = u'schedconfig'
class SchedconfigJson(models.Model):
pandaqueue = models.CharField(max_length=180, db_column='panda_queue', primary_key=True)
data = models.TextField(db_column='data', blank=True)
lastupdate = models.DateField(db_column='last_update')
class Meta:
db_table = f'"{DB_SCHEMA_PANDA}"."schedconfig_json"'
class Schedinstance(models.Model):
name = models.CharField(max_length=180, db_column='NAME')
nickname = models.CharField(max_length=180, db_column='NICKNAME', primary_key=True)
pandasite = models.CharField(max_length=180, db_column='PANDASITE')
nqueue = models.IntegerField(db_column='NQUEUE')
nqueued = models.IntegerField(db_column='NQUEUED')
nrunning = models.IntegerField(db_column='NRUNNING')
nfinished = models.IntegerField(db_column='NFINISHED')
nfailed = models.IntegerField(db_column='NFAILED')
naborted = models.IntegerField(db_column='NABORTED')
njobs = models.IntegerField(db_column='NJOBS')
tvalid = models.DateTimeField(db_column='TVALID')
lastmod = models.DateTimeField(db_column='LASTMOD')
errinfo = models.CharField(max_length=450, db_column='ERRINFO', blank=True)
ndone = models.IntegerField(db_column='NDONE')
totrunt = models.IntegerField(db_column='TOTRUNT')
comment_field = models.CharField(max_length=1500, db_column='COMMENT_', blank=True) # Field renamed because it was a Python reserved word.
class Meta:
db_table = u'schedinstance'
unique_together = ('nickname', 'pandasite')
|
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import itertools
import os
import re
from oslo_config import cfg
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova import quota
osapi_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='The maximum number of items returned in a single '
'response from a collection resource'),
cfg.StrOpt('osapi_compute_link_prefix',
help='Base URL that will be presented to users in links '
'to the OpenStack Compute API'),
cfg.StrOpt('osapi_glance_link_prefix',
help='Base URL that will be presented to users in links '
'to glance resources'),
]
CONF = cfg.CONF
CONF.register_opts(osapi_opts)
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
# NOTE(cyeoh): A common regexp for acceptable names (user supplied)
# that we want all new extensions to conform to unless there is a very
# good reason not to.
VALID_NAME_REGEX = re.compile("^(?! )[\w. _-]+(?<! )$", re.UNICODE)
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
_STATE_MAP = {
vm_states.ACTIVE: {
'default': 'ACTIVE',
task_states.REBOOTING: 'REBOOT',
task_states.REBOOT_PENDING: 'REBOOT',
task_states.REBOOT_STARTED: 'REBOOT',
task_states.REBOOTING_HARD: 'HARD_REBOOT',
task_states.REBOOT_PENDING_HARD: 'HARD_REBOOT',
task_states.REBOOT_STARTED_HARD: 'HARD_REBOOT',
task_states.UPDATING_PASSWORD: 'PASSWORD',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
task_states.MIGRATING: 'MIGRATING',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
},
vm_states.BUILDING: {
'default': 'BUILD',
},
vm_states.STOPPED: {
'default': 'SHUTOFF',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
},
vm_states.RESIZED: {
'default': 'VERIFY_RESIZE',
# Note(maoy): the OS API spec 1.1 doesn't have CONFIRMING_RESIZE
# state so we comment that out for future reference only.
#task_states.RESIZE_CONFIRMING: 'CONFIRMING_RESIZE',
task_states.RESIZE_REVERTING: 'REVERT_RESIZE',
},
vm_states.PAUSED: {
'default': 'PAUSED',
},
vm_states.SUSPENDED: {
'default': 'SUSPENDED',
},
vm_states.RESCUED: {
'default': 'RESCUE',
},
vm_states.ERROR: {
'default': 'ERROR',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
},
vm_states.DELETED: {
'default': 'DELETED',
},
vm_states.SOFT_DELETED: {
'default': 'SOFT_DELETED',
},
vm_states.SHELVED: {
'default': 'SHELVED',
},
vm_states.SHELVED_OFFLOADED: {
'default': 'SHELVED_OFFLOADED',
},
}
def status_from_state(vm_state, task_state='default'):
"""Given vm_state and task_state, return a status string."""
task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN'))
status = task_map.get(task_state, task_map['default'])
if status == "UNKNOWN":
LOG.error(_LE("status is UNKNOWN from vm_state=%(vm_state)s "
"task_state=%(task_state)s. Bad upgrade or db "
"corrupted?"),
{'vm_state': vm_state, 'task_state': task_state})
return status
def task_and_vm_state_from_status(statuses):
"""Map the server's multiple status strings to list of vm states and
list of task states.
"""
vm_states = set()
task_states = set()
lower_statuses = [status.lower() for status in statuses]
for state, task_map in _STATE_MAP.iteritems():
for task_state, mapped_state in task_map.iteritems():
status_string = mapped_state
if status_string.lower() in lower_statuses:
vm_states.add(state)
task_states.add(task_state)
# Add sort to avoid different order on set in Python 3
return sorted(vm_states), sorted(task_states)
def get_sort_params(input_params, default_key='created_at',
default_dir='desc'):
"""Retrieves sort keys/directions parameters.
Processes the parameters to create a list of sort keys and sort directions
that correspond to the 'sort_key' and 'sort_dir' parameter values. These
sorting parameters can be specified multiple times in order to generate
the list of sort keys and directions.
The input parameters are not modified.
:param input_params: webob.multidict of request parameters (from
nova.wsgi.Request.params)
:param default_key: default sort key value, added to the list if no
'sort_key' parameters are supplied
:param default_dir: default sort dir value, added to the list if no
'sort_dir' parameters are supplied
:returns: list of sort keys, list of sort dirs
"""
params = input_params.copy()
sort_keys = []
sort_dirs = []
while 'sort_key' in params:
sort_keys.append(params.pop('sort_key').strip())
while 'sort_dir' in params:
sort_dirs.append(params.pop('sort_dir').strip())
if len(sort_keys) == 0 and default_key:
sort_keys.append(default_key)
if len(sort_dirs) == 0 and default_dir:
sort_dirs.append(default_dir)
return sort_keys, sort_dirs
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_int_param(request, 'limit')
if 'page_size' in request.GET:
params['page_size'] = _get_int_param(request, 'page_size')
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
return params
def _get_int_param(request, param):
"""Extract integer param from request or fail."""
try:
int_param = int(request.GET[param])
except ValueError:
msg = _('%s param must be an integer') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
if int_param < 0:
msg = _('%s param must be positive') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
return int_param
def _get_marker_param(request):
"""Extract marker id from request or fail."""
return request.GET['marker']
def limited(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit):
"""get limited parameter from request."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
limit = min(max_limit, limit)
marker = params.get('marker')
return limit, marker
def get_id_from_href(href):
"""Return the id or uuid portion of a url.
Given: 'http://www.foo.com/bar/123?q=4'
Returns: '123'
Given: 'http://www.foo.com/bar/abc123?q=4'
Returns: 'abc123'
"""
return urlparse.urlsplit("%s" % href).path.split('/')[-1]
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://www.nova.com/v1.1/123'
Returns: 'http://www.nova.com/123'
Given: 'http://www.nova.com/v1.1'
Returns: 'http://www.nova.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if expression.match(url_parts[1]):
del url_parts[1]
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
LOG.debug('href %s does not contain version' % href)
raise ValueError(_('href %s does not contain version') % href)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
def check_img_metadata_properties_quota(context, metadata):
if not metadata:
return
try:
QUOTAS.limit_check(context, metadata_items=len(metadata))
except exception.OverQuota:
expl = _("Image metadata limit exceeded")
raise webob.exc.HTTPForbidden(explanation=expl)
# check the key length.
if isinstance(metadata, dict):
for key, value in metadata.iteritems():
if len(key) == 0:
expl = _("Image metadata key cannot be blank")
raise webob.exc.HTTPBadRequest(explanation=expl)
if len(key) > 255:
expl = _("Image metadata key too long")
raise webob.exc.HTTPBadRequest(explanation=expl)
else:
expl = _("Invalid image metadata")
raise webob.exc.HTTPBadRequest(explanation=expl)
def dict_to_query_str(params):
# TODO(throughnothing): we should just use urllib.urlencode instead of this
# But currently we don't work with urlencoded url's
param_str = ""
for key, val in params.iteritems():
param_str = param_str + '='.join([str(key), str(val)]) + '&'
return param_str.rstrip('&')
def get_networks_for_instance_from_nw_info(nw_info):
networks = collections.OrderedDict()
for vif in nw_info:
ips = vif.fixed_ips()
floaters = vif.floating_ips()
label = vif['network']['label']
if label not in networks:
networks[label] = {'ips': [], 'floating_ips': []}
networks[label]['ips'].extend(ips)
networks[label]['floating_ips'].extend(floaters)
for ip in itertools.chain(networks[label]['ips'],
networks[label]['floating_ips']):
ip['mac_address'] = vif['address']
return networks
def get_networks_for_instance(context, instance):
"""Returns a prepared nw_info list for passing into the view builders
We end up with a data structure like::
{'public': {'ips': [{'address': '10.0.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '2001::1',
'version': 6,
'mac_address': 'aa:aa:aa:aa:aa:aa'}],
'floating_ips': [{'address': '172.16.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '172.16.2.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'}]},
...}
"""
nw_info = compute_utils.get_nw_info_for_instance(instance)
return get_networks_for_instance_from_nw_info(nw_info)
def raise_http_conflict_for_instance_invalid_state(exc, action, server_id):
"""Raises a webob.exc.HTTPConflict instance containing a message
appropriate to return via the API based on the original
InstanceInvalidState exception.
"""
attr = exc.kwargs.get('attr')
state = exc.kwargs.get('state')
not_launched = exc.kwargs.get('not_launched')
if attr and state:
msg = _("Cannot '%(action)s' instance %(server_id)s while it is in "
"%(attr)s %(state)s") % {'action': action, 'attr': attr,
'state': state,
'server_id': server_id}
elif not_launched:
msg = _("Cannot '%(action)' instance %(server_id)s which has never "
"been active") % {'action': action, 'server_id': server_id}
else:
# At least give some meaningful message
msg = _("Instance %(server_id)s is in an invalid state for "
"'%(action)s'") % {'action': action, 'server_id': server_id}
raise webob.exc.HTTPConflict(explanation=msg)
def check_snapshots_enabled(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.allow_instance_snapshots:
LOG.warning(_LW('Rejecting snapshot request, snapshots currently'
' disabled'))
msg = _("Instance snapshots are not permitted at this time.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return f(*args, **kwargs)
return inner
class ViewBuilder(object):
"""Model API responses as dictionaries."""
def _get_project_id(self, request):
"""Get project id from request url if present or empty string
otherwise
"""
project_id = request.environ["nova.context"].project_id
if project_id in request.url:
return project_id
return ''
def _get_links(self, request, identifier, collection_name):
return [{
"rel": "self",
"href": self._get_href_link(request, identifier, collection_name),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
identifier,
collection_name),
}]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_compute_link_prefix(request.application_url)
url = os.path.join(prefix,
self._get_project_id(request),
collection_name)
return "%s?%s" % (url, dict_to_query_str(params))
def _get_href_link(self, request, identifier, collection_name):
"""Return an href string pointing to this object."""
prefix = self._update_compute_link_prefix(request.application_url)
return os.path.join(prefix,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier, collection_name):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_compute_link_prefix(base_url)
return os.path.join(base_url,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_collection_links(self,
request,
items,
collection_name,
id_key="uuid"):
"""Retrieve 'next' link, if applicable. This is included if:
1) 'limit' param is specified and equals the number of items.
2) 'limit' param is specified but it exceeds CONF.osapi_max_limit,
in this case the number of items is CONF.osapi_max_limit.
3) 'limit' param is NOT specified but the number of items is
CONF.osapi_max_limit.
"""
links = []
max_items = min(
int(request.params.get("limit", CONF.osapi_max_limit)),
CONF.osapi_max_limit)
if max_items and max_items == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
elif 'id' in last_item:
last_item_id = last_item["id"]
else:
last_item_id = last_item["flavorid"]
links.append({
"rel": "next",
"href": self._get_next_link(request,
last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
url_parts[2] = prefix_parts[2] + url_parts[2]
return urlparse.urlunsplit(url_parts).rstrip('/')
def _update_glance_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_glance_link_prefix)
def _update_compute_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_compute_link_prefix)
def get_instance(compute_api, context, instance_id, want_objects=False,
expected_attrs=None):
"""Fetch an instance from the compute API, handling error checking."""
try:
return compute_api.get(context, instance_id,
want_objects=want_objects,
expected_attrs=expected_attrs)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
def check_cells_enabled(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if not CONF.cells.enable:
msg = _("Cells is not enabled.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return function(*args, **kwargs)
return inner
|
|
__author__ = 'jpi'
import urllib2
import json
import time
import os
import re
from datetime import datetime
from decimal import Decimal
from urllib2 import HTTPError
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth import get_user_model
from stadtgedaechtnis_backend.utils import replace_multiple, get_nearby_locations
from stadtgedaechtnis_backend.models import Location, Story, Asset, MediaSource, find_user_by_name, Category
STADTGEDAECHTNIS_URL = "http://www.stadtgeschichte-coburg.de/"
def load_json(source):
"""
Loads the JSON file from the source and makes it available in a usable form.
"""
response = urllib2.urlopen(source)
result = response.read()
# add double quotes
dictionary = {
" id:": " \"id\":",
" isDuration:": " \"isDuration\":",
" type:": " \"type\":",
" addressLatLng:": " \"addressLatLng\":",
" typename:": " \"typename\":",
" created:": " \"created\":",
" label:": " \"label\":",
" preview:": " \"preview\":",
" pic:": " \"pic\":",
" pic_text:": " \"pic_text\":",
" timeStart:": " \"timeStart\":",
" author:": " \"author\":",
" www:": " \"www\":",
" details:": " \"details\":",
" timeEnd:": " \"timeEnd\":",
" nr:": " \"nr\":",
" age:": " \"age\":",
" types:": " \"types\":",
"pluralLabel:": "\"pluralLabel\":",
" properties:": " \"properties\":",
"valueType:": "\"valueType\":",
" quellen:": " \"quellen\":",
" richtext:": " \"richtext\":",
" categories:": " \"categories\":",
" ": "",
"\r\n": "",
",\r\n,": ",",
}
# make the JSON valid
result = replace_multiple(result, dictionary)
def fix_richtext(match):
# replace " in richtext with "
richtext = match.group(0)
original_group_1 = match.groups()[1]
group_1 = original_group_1.replace("\"", """)
def replace_quot(quot_match):
replace_in = quot_match.group(0)
# replace " in html tags with '
return replace_in.replace(""", "'")
group_1 = re.sub(r'<[^/]([^>]*)>', replace_quot, group_1)
return_result = richtext.replace(original_group_1, group_1)
return return_result
# clean richtext and quellen section
result = re.sub(r'(\"richtext\": \"([^\r]*)\"\r)', fix_richtext, result)
result = re.sub(r'(\"quellen\": \"([^\r]*)\", \"richtext\":)', fix_richtext, result)
json_result = json.loads(result)
# select all the items
items = json_result["items"]
return items
class AddEntryMixIn(object):
"""
Mix-in that can add a story to a given entry
"""
success_entries = []
exist_entries = []
failed_entries = []
def add_story(self, label, story, location_object=None):
# only insert story if story does not exist so far
if not Story.objects.filter(title=label).exists():
entry = Story()
entry.title = label
entry.location = location_object
entry_author = story["author"]
try:
authors = find_user_by_name(entry_author)
entry.author = authors[0]
except get_user_model().DoesNotExist:
author = get_user_model().objects.create_user(entry_author.replace(" ", "_"))
author.first_name = entry_author[:entry_author.rindex(" ")]
author.last_name = entry_author[entry_author.rindex(" "):]
author.save()
entry.author = author
entry.abstract = story["preview"]
if "timeStart" in story:
try:
entry.time_start = time.strftime("%Y-%m-%d", time.strptime(story["timeStart"], "%Y-%m-%d"))
except ValueError:
entry.time_start = time.strftime("%Y-%m-%d",
time.strptime(story["created"], "%d.%m.%Y %H:%M:%S"))
else:
entry.time_start = time.strftime("%Y-%m-%d",
time.strptime(story["created"], "%d.%m.%Y %H:%M:%S"))
if "timeEnd" in story:
entry.time_end = story["timeEnd"]
if "quellen" in story:
entry.sources = story["quellen"]
entry.save()
if "categories" in story:
for category in story["categories"]:
try:
category_object = Category.objects.get(name=category)
except Category.DoesNotExist:
category_object = Category()
category_object.name = category
category_object.save()
entry.categories.add(category_object)
if "richtext" in story:
richtext = story["richtext"]
def replace_img(match):
groupdict = match.groupdict()
src = groupdict["src"]
alt = groupdict["alt"]
if src is not None:
url = STADTGEDAECHTNIS_URL + src
download_image(entry, url, alt)
return ""
richtext = re.sub(r'<img[^>]*src=\'(?P<src>[^>\']*)\'[^>]*alt=\'(?P<alt>[^>\']*)\'[^>]*[/]?>',
replace_img, richtext)
richtext = re.sub(r'<img[^>]*alt=\'(?P<alt>[^>\']*)\'[^>]*src=\'(?P<src>[^>\']*)\'[^>]*[/]?>',
replace_img, richtext)
entry.text = richtext
entry.save()
if "pic" in story and story["pic"] != "":
picture_url = "http://www.stadtgeschichte-coburg.de/" + story["pic"]
# populate the MediaObject
download_image(entry, picture_url, story["pic_text"] if "pic_text" in story else "")
else:
entry.save()
# Add entry to succeeded entry list
self.success_entries.append(entry)
else:
entry = dict()
entry["title"] = label
entry["location"] = location_object
self.exist_entries.append(entry)
def download_image(entry, picture_url, alt=""):
"""
Downloads an image from the stadtgedaechtnis server and attaches it to the given entry.
:param entry: Entry
:param picture_url: Picture URL
:param alt: Alt for image
:return:
"""
media_object = Asset()
media_object.type = Asset.IMAGE
media_object.created = datetime.now()
media_object.modified = datetime.now()
if alt is None:
alt = ""
media_object.alt = alt
media_object.save()
entry.assets.add(media_object)
# populate the MediaSource
media_source = MediaSource()
media_source.created = datetime.now()
media_source.modified = datetime.now()
media_source.asset = media_object
# get a correct upload path for the image
filename = media_source.get_upload_path("upload.jpg")
try:
download_file = urllib2.urlopen(picture_url)
# create intermittent directories if not present
if not os.path.exists(os.path.dirname(settings.MEDIA_ROOT + filename)):
os.makedirs(os.path.dirname(settings.MEDIA_ROOT + filename))
# open local file
media_file = open(settings.MEDIA_ROOT + filename, "wb")
# download and save file at once (memory!)
media_file.write(download_file.read())
media_file.close()
except HTTPError:
pass
finally:
media_source.file.name = filename
media_source.save()
class JSONAllEntriesImporter(AddEntryMixIn):
"""
Imports all entries from the given source.
"""
def __init__(self, source):
# set source
self.source = source
def do_import(self):
"""
Does the actual import.
:return:
"""
self.success_entries = []
self.failed_entries = []
self.exist_entries = []
items = load_json(self.source)
# filter for all the locations
location_items = filter(lambda entry: "id" in entry, items)
story_items = filter(lambda entry: "label" in entry, items)
# iterate over all the located stories
for location in location_items:
lat, lon = location["addressLatLng"].split(",")
lat, lon = Decimal(lat), Decimal(lon)
label = location["id"]
# find the story
story = filter(lambda entry: entry["label"] == label, story_items)[0]
# check if story already imported
if not Story.objects.filter(title=label).exists():
location_objects = Location.objects.filter(latitude=lat, longitude=lon)
if len(location_objects) > 0:
location_object = location_objects[0]
self.add_story(label, story, location_object)
else:
location["lat"] = str(lat)
location["lon"] = str(lon)
location["url"] = reverse('admin:stadtgedaechtnis_backend_location_add') + \
"?latitude=" + str(lat) + "&longitude=" + str(lon)
location["near_locations"] = list()
location["nr"] = story["nr"]
search_lat = lat - Decimal(0.0007)
search_lon = lon - Decimal(0.0007)
search_max_lat = lat + Decimal(0.0007)
search_max_lon = lon + Decimal(0.0007)
for nearby_location in get_nearby_locations(search_lat, search_lon, search_max_lat, search_max_lon):
near_location = dict()
near_location["id"] = nearby_location.id
near_location["label"] = nearby_location.__unicode__()
location["near_locations"].append(near_location)
self.failed_entries.append(location)
else:
saved_stories = Story.objects.filter(title=label)
saved_story = saved_stories[0]
entry = dict()
entry["title"] = label
entry["location"] = saved_story.location
self.exist_entries.append(entry)
# remove located story from story_items
story_items.remove(story)
# iterate over all the remaining stories without location
for story in story_items:
self.add_story(story["label"], story)
class JSONOneEntryImporter(AddEntryMixIn):
"""
Class that imports one specific entry from the JSON source.
"""
def __init__(self, source, item_id, location_id):
self.source = source
self.item_id = item_id
self.location_id = location_id
def do_import(self):
# load json
items = load_json(self.source)
def find_id(entry):
if "nr" in entry and entry["nr"] == self.item_id:
return True
return False
# find the respective story
story = filter(find_id, items)[0]
try:
# get the location
location_object = Location.objects.get(pk=self.location_id)
# add story
self.add_story(story["label"], story, location_object)
except Location.DoesNotExist:
raise ValueError("No location found for location_id %s" % self.location_id)
def do_silent_json_import(source):
"""
Cronjob to import all the entries silently and save entries, that haven't been
imported to a log list. Also deletes log entries older than 7 days.
:param source:
:return:
"""
importer = JSONAllEntriesImporter(source)
importer.do_import()
from stadtgedaechtnis_backend.models import ImportLogEntry
from datetime import timedelta
log_entry = ImportLogEntry()
log_entry.existed_entries = len(importer.exist_entries)
log_entry.failed_entries = len(importer.failed_entries)
log_entry.imported_entries = len(importer.success_entries)
log_entry.save()
ImportLogEntry.objects.filter(date_time__lte=datetime.now() - timedelta(days=7)).delete()
|
|
#adults dataset
def load_adult():
"""loads adult dataset"""
remove_sp = lambda n: n.replace(' ', '')
last_column = lambda i: i.pop(-1)
binary_= lambda u: 0 if u == '<=50K' else 1
defs_ = [
{'age': None},
{'workclass': ['Private', '?', 'Self-emp-not-inc', 'Self-emp-inc', 'Federal-gov', 'Local-gov', 'State-gov', 'Without-pay', 'Never-worked']},
{'fnlwgt': None},
{'education': ['Bachelors', '?', ' Some-college', ' 11th', ' HS-grad', ' Prof-school', ' Assoc-acdm', ' Assoc-voc', ' 9th', ' 7th-8th', ' 12th', ' Masters', ' 1st-4th', ' 10th', ' Doctorate', ' 5th-6th', ' Preschool']},
{'education-num': None},
{'marital-status': ['Married-civ-spouse', '?', 'Divorced', 'Never-married', 'Separated', 'Widowed', ' Married-spouse-absent', ' Married-AF-spouse']},
{'occupation': ['Tech-support', ' Craft-repair', '?', ' Other-service', ' Sales', ' Exec-managerial', ' Prof-specialty', ' Handlers-cleaners', ' Machine-op-inspct', 'Adm-clerical', ' Farming-fishing', ' Transport-moving', ' Priv-house-serv', ' Protective-serv', ' Armed-Forces']},
{'relationship': ['Wife', ' Own-child', ' Husband', '?', ' Not-in-family', ' Other-relative', ' Unmarried']},
{'race': ['White', ' Asian-Pac-Islander', '?', ' Amer-Indian-Eskimo', ' Other', ' Black']},
{'sex': ['Female', ' Male', '?']},
{'capital-gain': None},
{'capital-loss': None},
{'hours-per-week': None},
{'native-country': ['United-States', '?', ' Cambodia', ' England', ' Puerto-Rico', ' Canada', ' Germany', ' Outlying-US(Guam-USVI-etc)', ' India', ' Japan', ' Greece', ' South', ' China', ' Cuba', ' Iran', ' Honduras', ' Philippines', ' Italy', ' Poland', ' Jamaica', ' Vietnam', ' Mexico', ' Portugal', ' Ireland', ' France', ' Dominican-Republic', ' Laos', ' Ecuador', ' Taiwan', ' Haiti', ' Columbia', ' Hungary', ' Guatemala', ' Nicaragua', ' Scotland', ' Thailand', ' Yugoslavia', ' El-Salvador', ' Trinadad&Tobago', ' Peru', ' Hong', ' Holand-Netherlands']}
]
v =-1
for i,a in enumerate(defs_):
current_col = a
v += 1
key_ = current_col.keys()[0]
if current_col[key_]:
defs_[i][key_] = dict([(b.strip(' '), i_) for b, i_ in zip(current_col[key_], range(0, len(current_col[key_])))])
defs_[i][v] = defs_[i].pop(key_)
y = ''
f = open("datasets_/adults.txt", 'rb')
for a in f:
y += a
y = y.split('\n')
y.pop(-1)
labels_ = []
for n, j in enumerate(y):
y[n] = y[n].split(',')
current_ = map(remove_sp, y[n])
indicator_ = current_.pop(-1)
labels_.append(indicator_)
for i, a in enumerate(current_):
column_ = defs_[i]
if column_.values()[0] == None:
current_[i] = float(current_[i])
elif column_.values()[0].has_key(current_[i]):
current_[i] = column_.values()[0][current_[i]]
y[n] = current_
return y, map(binary_, labels_)
#wines dataset
def load_wines():
y = ''
f = open('datasets_/wines.txt', 'rb')
for a in f:
y += a
y = y.split('\n')
labels_ = []
for i, a in enumerate(y):
y[i] = y[i].split(',')
indicator_ = y[i].pop(0)
labels_.append(indicator_)
y[i] = map(float, y[i])
return y, map(float, labels_)
#car dataset
#http://archive.ics.uci.edu/ml/machine-learning-databases/car/
def load_cars():
def replace_stuff(n):
if n in ['more','5more']:
return 5
else:
return n
defs_ = [
{'buying': {'vhigh': 4, 'high': 3, 'med': 2, 'low': 1}},
{'maint': {'vhigh': 4, 'high': 3, 'med': 2, 'low': 1}},
{'doors': None},
{'persons': None},
{'lug_boot': {'small': 1, 'med': 2, 'big': 3}},
{'safety': {'low': 1, 'med': 2, 'high': 3}},
]
v = -1
for i, a in enumerate(defs_):
v += 1
key_ = defs_[i].keys()[0]
defs_[i][v] = defs_[i].pop(key_)
y = ''
f = open('datasets_/cars.txt', 'rb')
for a in f:
y += a
y = y.split('\n')
labels_ = []
for i, a in enumerate(y):
y[i] = y[i].split(',')
indicator_ = y[i].pop(-1)
labels_.append(indicator_)
current_ = map(replace_stuff, y[i])
for j, b in enumerate(current_):
col_ = defs_[j]
item_ = current_[j]
if col_.values()[0] == None:
current_[j]= float(current_[j])
else:
if col_.values()[0].has_key(current_[j]):
current_[j] = col_.values()[0][current_[j]]
y[i] = current_
return y, labels_
#yeasts dataset (all continuous)
#http://archive.ics.uci.edu/ml/machine-learning-databases/yeast/yeast.data
def load_yeast():
defs_ = {'sequence_name': str,
'mcg': float,
'gvh': float,
'alm': float,
'mit': float,
'erl': float,
'pox': float,
'vac': float,
'nuc': float,
'class': str
}
f = open('datasets_/yeast.txt', 'rb')
y = ''
for a in f:
y += a
y = y.split('\n')
labels_ = []
for i, a in enumerate(y):
y[i]= y[i].split(' ')
indicator_ = y[i].pop(-1)
labels_.append(indicator_)
remove_first = y[i].pop(0)
y[i] = map(float, filter(lambda n: len(n) > 0, y[i]))
return y, labels_
#wine quality dataset (all continuous)
#http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality.names
def load_wine_quality():
defs_ = {
'fixed acidity': float,
'volatile acidity': float,
'citric acid': float,
'residual sugar': float,
'chlorides': float,
'free sulfur dioxide': float,
'total sulfur dioxide': float,
'density': float,
'pH': float,
'sulphates': float,
'alcohol': float,
'quality': int
}
f = open('datasets_/wine_quality.txt', 'rb')
y = ''
for a in f:
y += a
y = y.split('\n')
y.pop(-1)
labels_ = []
for i, a in enumerate(y):
y[i] = filter(lambda n : len(n) > 0, y[i].split('\t'))
indicator_ = y[i].pop(-1)
labels_.append(int(indicator_))
y[i] = map(float, y[i])
return y, labels_
#seeds dataset (all continuous)
#https://archive.ics.uci.edu/ml/machine-learning-databases/00236/seeds_dataset.txt
def load_seeds():
defs_ = {
'area': float,
'perimeter': float,
'compactness': float,
'width of kernel': float,
'asymmetry coefficient': float,
'length of kernel groove': float,
'seed type': int
}
f = open('datasets_/seeds.txt', 'rb')
y = ''
for a in f:
y += a
y = y.split('\n')
labels_ = []
for i, a in enumerate(y):
y[i] = filter(lambda n: len(n) > 0, y[i].split('\t'))
indicator_ = y[i].pop(-1)
labels_.append(int(indicator_))
y[i] = map(float, y[i])
return y, labels_
|
|
"""Tests for async util methods from Python source."""
import asyncio
import sys
from unittest.mock import MagicMock, patch
from unittest import TestCase
import pytest
from homeassistant.util import async_ as hasync
@patch("asyncio.coroutines.iscoroutine")
@patch("concurrent.futures.Future")
@patch("threading.get_ident")
def test_run_coroutine_threadsafe_from_inside_event_loop(
mock_ident, _, mock_iscoroutine
):
"""Testing calling run_coroutine_threadsafe from inside an event loop."""
coro = MagicMock()
loop = MagicMock()
loop._thread_ident = None
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
hasync.run_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 5
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
with pytest.raises(RuntimeError):
hasync.run_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
mock_iscoroutine.return_value = False
with pytest.raises(TypeError):
hasync.run_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
hasync.run_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 2
@patch("asyncio.coroutines.iscoroutine")
@patch("concurrent.futures.Future")
@patch("threading.get_ident")
def test_fire_coroutine_threadsafe_from_inside_event_loop(
mock_ident, _, mock_iscoroutine
):
"""Testing calling fire_coroutine_threadsafe from inside an event loop."""
coro = MagicMock()
loop = MagicMock()
loop._thread_ident = None
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 5
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
with pytest.raises(RuntimeError):
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
mock_iscoroutine.return_value = False
with pytest.raises(TypeError):
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
mock_iscoroutine.return_value = True
hasync.fire_coroutine_threadsafe(coro, loop)
assert len(loop.call_soon_threadsafe.mock_calls) == 2
@patch("concurrent.futures.Future")
@patch("threading.get_ident")
def test_run_callback_threadsafe_from_inside_event_loop(mock_ident, _):
"""Testing calling run_callback_threadsafe from inside an event loop."""
callback = MagicMock()
loop = MagicMock()
loop._thread_ident = None
mock_ident.return_value = 5
hasync.run_callback_threadsafe(loop, callback)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 5
mock_ident.return_value = 5
with pytest.raises(RuntimeError):
hasync.run_callback_threadsafe(loop, callback)
assert len(loop.call_soon_threadsafe.mock_calls) == 1
loop._thread_ident = 1
mock_ident.return_value = 5
hasync.run_callback_threadsafe(loop, callback)
assert len(loop.call_soon_threadsafe.mock_calls) == 2
class RunThreadsafeTests(TestCase):
"""Test case for hasync.run_coroutine_threadsafe."""
def setUp(self):
"""Test setup method."""
self.loop = asyncio.new_event_loop()
def tearDown(self):
"""Test teardown method."""
executor = self.loop._default_executor
if executor is not None:
executor.shutdown(wait=True)
self.loop.close()
@staticmethod
def run_briefly(loop):
"""Momentarily run a coroutine on the given loop."""
@asyncio.coroutine
def once():
pass
gen = once()
t = loop.create_task(gen)
try:
loop.run_until_complete(t)
finally:
gen.close()
def add_callback(self, a, b, fail, invalid):
"""Return a + b."""
if fail:
raise RuntimeError("Fail!")
if invalid:
raise ValueError("Invalid!")
return a + b
@asyncio.coroutine
def add_coroutine(self, a, b, fail, invalid, cancel):
"""Wait 0.05 second and return a + b."""
yield from asyncio.sleep(0.05, loop=self.loop)
if cancel:
if sys.version_info[:2] >= (3, 7):
current_task = asyncio.current_task
else:
current_task = asyncio.tasks.Task.current_task
current_task(self.loop).cancel()
yield
return self.add_callback(a, b, fail, invalid)
def target_callback(self, fail=False, invalid=False):
"""Run add callback in the event loop."""
future = hasync.run_callback_threadsafe(
self.loop, self.add_callback, 1, 2, fail, invalid
)
try:
return future.result()
finally:
future.done() or future.cancel()
def target_coroutine(
self, fail=False, invalid=False, cancel=False, timeout=None, advance_coro=False
):
"""Run add coroutine in the event loop."""
coro = self.add_coroutine(1, 2, fail, invalid, cancel)
future = hasync.run_coroutine_threadsafe(coro, self.loop)
if advance_coro:
# this is for test_run_coroutine_threadsafe_task_factory_exception;
# otherwise it spills errors and breaks **other** unittests, since
# 'target_coroutine' is interacting with threads.
# With this call, `coro` will be advanced, so that
# CoroWrapper.__del__ won't do anything when asyncio tests run
# in debug mode.
self.loop.call_soon_threadsafe(coro.send, None)
try:
return future.result(timeout)
finally:
future.done() or future.cancel()
def test_run_coroutine_threadsafe(self):
"""Test coroutine submission from a thread to an event loop."""
future = self.loop.run_in_executor(None, self.target_coroutine)
result = self.loop.run_until_complete(future)
self.assertEqual(result, 3)
def test_run_coroutine_threadsafe_with_exception(self):
"""Test coroutine submission from thread to event loop on exception."""
future = self.loop.run_in_executor(None, self.target_coroutine, True)
with self.assertRaises(RuntimeError) as exc_context:
self.loop.run_until_complete(future)
self.assertIn("Fail!", exc_context.exception.args)
def test_run_coroutine_threadsafe_with_invalid(self):
"""Test coroutine submission from thread to event loop on invalid."""
callback = lambda: self.target_coroutine(invalid=True) # noqa
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(ValueError) as exc_context:
self.loop.run_until_complete(future)
self.assertIn("Invalid!", exc_context.exception.args)
def test_run_coroutine_threadsafe_with_timeout(self):
"""Test coroutine submission from thread to event loop on timeout."""
callback = lambda: self.target_coroutine(timeout=0) # noqa
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(future)
self.run_briefly(self.loop)
# Check that there's no pending task (add has been cancelled)
if sys.version_info[:2] >= (3, 7):
all_tasks = asyncio.all_tasks
else:
all_tasks = asyncio.Task.all_tasks
for task in all_tasks(self.loop):
self.assertTrue(task.done())
def test_run_coroutine_threadsafe_task_cancelled(self):
"""Test coroutine submission from tread to event loop on cancel."""
callback = lambda: self.target_coroutine(cancel=True) # noqa
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(future)
def test_run_callback_threadsafe(self):
"""Test callback submission from a thread to an event loop."""
future = self.loop.run_in_executor(None, self.target_callback)
result = self.loop.run_until_complete(future)
self.assertEqual(result, 3)
def test_run_callback_threadsafe_with_exception(self):
"""Test callback submission from thread to event loop on exception."""
future = self.loop.run_in_executor(None, self.target_callback, True)
with self.assertRaises(RuntimeError) as exc_context:
self.loop.run_until_complete(future)
self.assertIn("Fail!", exc_context.exception.args)
def test_run_callback_threadsafe_with_invalid(self):
"""Test callback submission from thread to event loop on invalid."""
callback = lambda: self.target_callback(invalid=True) # noqa
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(ValueError) as exc_context:
self.loop.run_until_complete(future)
self.assertIn("Invalid!", exc_context.exception.args)
|
|
"""Component to manage a shoppling list."""
import asyncio
import json
import logging
import os
import uuid
import voluptuous as vol
from homeassistant.const import HTTP_NOT_FOUND, HTTP_BAD_REQUEST
from homeassistant.core import callback
from homeassistant.components import http
from homeassistant.helpers import intent
import homeassistant.helpers.config_validation as cv
DOMAIN = 'shopping_list'
DEPENDENCIES = ['http']
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({DOMAIN: {}}, extra=vol.ALLOW_EXTRA)
EVENT = 'shopping_list_updated'
INTENT_ADD_ITEM = 'HassShoppingListAddItem'
INTENT_LAST_ITEMS = 'HassShoppingListLastItems'
ITEM_UPDATE_SCHEMA = vol.Schema({
'complete': bool,
'name': str,
})
PERSISTENCE = '.shopping_list.json'
@asyncio.coroutine
def async_setup(hass, config):
"""Initialize the shopping list."""
data = hass.data[DOMAIN] = ShoppingData(hass)
yield from data.async_load()
intent.async_register(hass, AddItemIntent())
intent.async_register(hass, ListTopItemsIntent())
hass.http.register_view(ShoppingListView)
hass.http.register_view(UpdateShoppingListItemView)
hass.http.register_view(ClearCompletedItemsView)
hass.components.conversation.async_register(INTENT_ADD_ITEM, [
'Add {item} to my shopping list',
])
hass.components.conversation.async_register(INTENT_LAST_ITEMS, [
'What is on my shopping list'
])
yield from hass.components.frontend.async_register_built_in_panel(
'shopping-list', 'shopping_list', 'mdi:cart')
return True
class ShoppingData:
"""Class to hold shopping list data."""
def __init__(self, hass):
"""Initialize the shopping list."""
self.hass = hass
self.items = []
@callback
def async_add(self, name):
"""Add a shopping list item."""
self.items.append({
'name': name,
'id': uuid.uuid4().hex,
'complete': False
})
self.hass.async_add_job(self.save)
@callback
def async_update(self, item_id, info):
"""Update a shopping list item."""
item = next((itm for itm in self.items if itm['id'] == item_id), None)
if item is None:
raise KeyError
info = ITEM_UPDATE_SCHEMA(info)
item.update(info)
self.hass.async_add_job(self.save)
return item
@callback
def async_clear_completed(self):
"""Clear completed items."""
self.items = [itm for itm in self.items if not itm['complete']]
self.hass.async_add_job(self.save)
@asyncio.coroutine
def async_load(self):
"""Load items."""
def load():
"""Load the items synchronously."""
path = self.hass.config.path(PERSISTENCE)
if not os.path.isfile(path):
return []
with open(path) as file:
return json.loads(file.read())
items = yield from self.hass.async_add_job(load)
self.items = items
def save(self):
"""Save the items."""
with open(self.hass.config.path(PERSISTENCE), 'wt') as file:
file.write(json.dumps(self.items, sort_keys=True, indent=4))
class AddItemIntent(intent.IntentHandler):
"""Handle AddItem intents."""
intent_type = INTENT_ADD_ITEM
slot_schema = {
'item': cv.string
}
@asyncio.coroutine
def async_handle(self, intent_obj):
"""Handle the intent."""
slots = self.async_validate_slots(intent_obj.slots)
item = slots['item']['value']
intent_obj.hass.data[DOMAIN].async_add(item)
response = intent_obj.create_response()
response.async_set_speech(
"I've added {} to your shopping list".format(item))
intent_obj.hass.bus.async_fire(EVENT)
return response
class ListTopItemsIntent(intent.IntentHandler):
"""Handle AddItem intents."""
intent_type = INTENT_LAST_ITEMS
slot_schema = {
'item': cv.string
}
@asyncio.coroutine
def async_handle(self, intent_obj):
"""Handle the intent."""
items = intent_obj.hass.data[DOMAIN].items[-5:]
response = intent_obj.create_response()
if not items:
response.async_set_speech(
"There are no items on your shopping list")
else:
response.async_set_speech(
"These are the top {} items on your shopping list: {}".format(
min(len(items), 5),
', '.join(itm['name'] for itm in reversed(items))))
return response
class ShoppingListView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/shopping_list'
name = "api:shopping_list"
@callback
def get(self, request):
"""Retrieve if API is running."""
return self.json(request.app['hass'].data[DOMAIN].items)
class UpdateShoppingListItemView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/shopping_list/item/{item_id}'
name = "api:shopping_list:item:id"
@callback
def post(self, request, item_id):
"""Retrieve if API is running."""
data = yield from request.json()
try:
item = request.app['hass'].data[DOMAIN].async_update(item_id, data)
request.app['hass'].bus.async_fire(EVENT)
return self.json(item)
except KeyError:
return self.json_message('Item not found', HTTP_NOT_FOUND)
except vol.Invalid:
return self.json_message('Item not found', HTTP_BAD_REQUEST)
class ClearCompletedItemsView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/shopping_list/clear_completed'
name = "api:shopping_list:clear_completed"
@callback
def post(self, request):
"""Retrieve if API is running."""
hass = request.app['hass']
hass.data[DOMAIN].async_clear_completed()
hass.bus.async_fire(EVENT)
return self.json_message('Cleared completed items.')
|
|
import logging
import pickle
from collections import MutableSet
from datetime import datetime
from sqlalchemy import Unicode, select, Column, Integer, DateTime, ForeignKey, or_, func
from sqlalchemy.orm import relationship
from sqlalchemy.sql.elements import and_
from flexget import db_schema
from flexget.db_schema import versioned_base
from flexget.entry import Entry
from flexget.manager import Session
from flexget.utils import json
from flexget.utils.database import entry_synonym, with_session
from flexget.utils.sqlalchemy_utils import table_schema, table_add_column
log = logging.getLogger('entry_list.db')
Base = versioned_base('entry_list', 1)
@db_schema.upgrade('entry_list')
def upgrade(ver, session):
if None is ver:
ver = 0
if ver == 0:
table = table_schema('entry_list_entries', session)
table_add_column(table, 'json', Unicode, session)
# Make sure we get the new schema with the added column
table = table_schema('entry_list_entries', session)
for row in session.execute(select([table.c.id, table.c.entry])):
try:
p = pickle.loads(row['entry'])
session.execute(
table.update()
.where(table.c.id == row['id'])
.values(json=json.dumps(p, encode_datetime=True))
)
except KeyError as e:
log.error('Unable error upgrading entry_list pickle object due to %s' % str(e))
ver = 1
return ver
class EntryListList(Base):
__tablename__ = 'entry_list_lists'
id = Column(Integer, primary_key=True)
name = Column(Unicode, unique=True)
added = Column(DateTime, default=datetime.now)
entries = relationship(
'EntryListEntry', backref='list', cascade='all, delete, delete-orphan', lazy='dynamic'
)
def to_dict(self):
return {'id': self.id, 'name': self.name, 'added_on': self.added}
class EntryListEntry(Base):
__tablename__ = 'entry_list_entries'
id = Column(Integer, primary_key=True)
list_id = Column(Integer, ForeignKey(EntryListList.id), nullable=False)
added = Column(DateTime, default=datetime.now)
title = Column(Unicode)
original_url = Column(Unicode)
_json = Column('json', Unicode)
entry = entry_synonym('_json')
def __init__(self, entry, entry_list_id):
self.title = entry['title']
self.original_url = entry.get('original_url') or entry['url']
self.entry = entry
self.list_id = entry_list_id
def __repr__(self):
return '<EntryListEntry,title=%s,original_url=%s>' % (self.title, self.original_url)
def to_dict(self):
return {
'id': self.id,
'list_id': self.list_id,
'added_on': self.added,
'title': self.title,
'original_url': self.original_url,
'entry': dict(self.entry),
}
class DBEntrySet(MutableSet):
def _db_list(self, session):
return session.query(EntryListList).filter(EntryListList.name == self.config).first()
def __init__(self, config):
self.config = config
with Session() as session:
if not self._db_list(session):
session.add(EntryListList(name=self.config))
def _entry_query(self, session, entry):
db_entry = (
session.query(EntryListEntry)
.filter(
and_(
EntryListEntry.list_id == self._db_list(session).id,
or_(
EntryListEntry.title == entry['title'],
and_(
EntryListEntry.original_url,
EntryListEntry.original_url == entry['original_url'],
),
),
)
)
.first()
)
return db_entry
def __iter__(self):
with Session() as session:
for e in self._db_list(session).entries.order_by(EntryListEntry.added.desc()).all():
log.debug('returning %s', e.entry)
yield e.entry
def __contains__(self, entry):
with Session() as session:
return self._entry_query(session, entry) is not None
def __len__(self):
with Session() as session:
return self._db_list(session).entries.count()
def discard(self, entry):
with Session() as session:
db_entry = self._entry_query(session=session, entry=entry)
if db_entry:
log.debug('deleting entry %s', db_entry)
session.delete(db_entry)
def add(self, entry):
# Evaluate all lazy fields so that no db access occurs during our db session
entry.values()
with Session() as session:
stored_entry = self._entry_query(session, entry)
if stored_entry:
# Refresh all the fields if we already have this entry
log.debug('refreshing entry %s', entry)
stored_entry.entry = entry
else:
log.debug('adding entry %s to list %s', entry, self._db_list(session).name)
stored_entry = EntryListEntry(entry=entry, entry_list_id=self._db_list(session).id)
session.add(stored_entry)
def __ior__(self, other):
# Optimization to only open one session when adding multiple items
# Make sure lazy lookups are done before opening our session to prevent db locks
for value in other:
value.values()
for value in other:
self.add(value)
return self
@property
def immutable(self):
return False
def _from_iterable(self, it):
# TODO: is this the right answer? the returned object won't have our custom __contains__ logic
return set(it)
@property
def online(self):
""" Set the online status of the plugin, online plugin should be treated differently in certain situations,
like test mode"""
return False
def get(self, entry):
with Session() as session:
match = self._entry_query(session=session, entry=entry)
return Entry(match.entry) if match else None
@with_session
def get_entry_lists(name=None, session=None):
log.debug('retrieving entry lists')
query = session.query(EntryListList)
if name:
log.debug('searching for entry lists with name %s', name)
query = query.filter(EntryListList.name.contains(name))
return query.all()
@with_session
def get_list_by_exact_name(name, session=None):
log.debug('returning entry list with name %s', name)
return (
session.query(EntryListList).filter(func.lower(EntryListList.name) == name.lower()).one()
)
@with_session
def get_list_by_id(list_id, session=None):
log.debug('fetching entry list with id %d', list_id)
return session.query(EntryListList).filter(EntryListList.id == list_id).one()
@with_session
def delete_list_by_id(list_id, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
log.debug('deleting entry list with id %d', list_id)
session.delete(entry_list)
@with_session
def get_entries_by_list_id(
list_id, start=None, stop=None, order_by='title', descending=False, session=None
):
log.debug('querying entries from entry list with id %d', list_id)
query = session.query(EntryListEntry).filter(EntryListEntry.list_id == list_id)
if descending:
query = query.order_by(getattr(EntryListEntry, order_by).desc())
else:
query = query.order_by(getattr(EntryListEntry, order_by))
return query.slice(start, stop).all()
@with_session
def get_entry_by_title(list_id, title, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
return (
session.query(EntryListEntry)
.filter(and_(EntryListEntry.title == title, EntryListEntry.list_id == list_id))
.first()
)
@with_session
def get_entry_by_id(list_id, entry_id, session=None):
log.debug('fetching entry with id %d from list id %d', entry_id, list_id)
return (
session.query(EntryListEntry)
.filter(and_(EntryListEntry.id == entry_id, EntryListEntry.list_id == list_id))
.one()
)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import netaddr
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.network import base
from tempest.common import custom_matchers
from tempest import config
from tempest import test
CONF = config.CONF
class NetworksTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
create a network for a tenant
list tenant's networks
show a tenant network details
create a subnet for a tenant
list tenant's subnets
show a tenant subnet details
network update
subnet update
delete a network also deletes its subnets
list external networks
All subnet tests are run once with ipv4 and once with ipv6.
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
tenant_network_cidr with a block of cidr's from which smaller blocks
can be allocated for tenant ipv4 subnets
tenant_network_v6_cidr is the equivalent for ipv6 subnets
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant_network_cidr
tenant_network_v6_mask_bits is the equivalent for ipv6 subnets
"""
@classmethod
def resource_setup(cls):
super(NetworksTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
cls._ip_version)
cls.cidr = cls.subnet['cidr']
cls._subnet_data = {6: {'gateway':
str(cls._get_gateway_from_tempest_conf(6)),
'allocation_pools':
cls._get_allocation_pools_from_gateway(6),
'dns_nameservers': ['2001:4860:4860::8844',
'2001:4860:4860::8888'],
'host_routes': [{'destination': '2001::/64',
'nexthop': '2003::1'}],
'new_host_routes': [{'destination':
'2001::/64',
'nexthop': '2005::1'}],
'new_dns_nameservers':
['2001:4860:4860::7744',
'2001:4860:4860::7888']},
4: {'gateway':
str(cls._get_gateway_from_tempest_conf(4)),
'allocation_pools':
cls._get_allocation_pools_from_gateway(4),
'dns_nameservers': ['8.8.4.4', '8.8.8.8'],
'host_routes': [{'destination': '10.20.0.0/32',
'nexthop': '10.100.1.1'}],
'new_host_routes': [{'destination':
'10.20.0.0/32',
'nexthop':
'10.100.1.2'}],
'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
@classmethod
def _create_subnet_with_last_subnet_block(cls, network, ip_version):
"""Derive last subnet CIDR block from tenant CIDR and
create the subnet with that derived CIDR
"""
if ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
elif ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
mask_bits = CONF.network.tenant_network_v6_mask_bits
subnet_cidr = list(cidr.subnet(mask_bits))[-1]
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
return cls.create_subnet(network, gateway=gateway_ip,
cidr=subnet_cidr, mask_bits=mask_bits)
@classmethod
def _get_gateway_from_tempest_conf(cls, ip_version):
"""Return first subnet gateway for configured CIDR """
if ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
elif ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
mask_bits = CONF.network.tenant_network_v6_mask_bits
if mask_bits >= cidr.prefixlen:
return netaddr.IPAddress(cidr) + 1
else:
for subnet in cidr.subnet(mask_bits):
return netaddr.IPAddress(subnet) + 1
@classmethod
def _get_allocation_pools_from_gateway(cls, ip_version):
"""Return allocation range for subnet of given gateway"""
gateway = cls._get_gateway_from_tempest_conf(ip_version)
return [{'start': str(gateway + 2), 'end': str(gateway + 3)}]
def subnet_dict(self, include_keys):
"""Return a subnet dict which has include_keys and their corresponding
value from self._subnet_data
"""
return dict((key, self._subnet_data[self._ip_version][key])
for key in include_keys)
def _compare_resource_attrs(self, actual, expected):
exclude_keys = set(actual).symmetric_difference(expected)
self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
expected, exclude_keys))
def _delete_network(self, network):
# Deleting network also deletes its subnets if exists
self.client.delete_network(network['id'])
if network in self.networks:
self.networks.remove(network)
for subnet in self.subnets:
if subnet['network_id'] == network['id']:
self.subnets.remove(subnet)
def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
**kwargs):
network = self.create_network()
net_id = network['id']
gateway = kwargs.pop('gateway', None)
subnet = self.create_subnet(network, gateway, cidr, mask_bits,
**kwargs)
compare_args_full = dict(gateway_ip=gateway, cidr=cidr,
mask_bits=mask_bits, **kwargs)
compare_args = dict((k, v) for k, v in compare_args_full.iteritems()
if v is not None)
if 'dns_nameservers' in set(subnet).intersection(compare_args):
self.assertEqual(sorted(compare_args['dns_nameservers']),
sorted(subnet['dns_nameservers']))
del subnet['dns_nameservers'], compare_args['dns_nameservers']
self._compare_resource_attrs(subnet, compare_args)
self.client.delete_network(net_id)
self.networks.pop()
self.subnets.pop()
@test.attr(type='smoke')
@test.idempotent_id('0e269138-0da6-4efc-a46d-578161e7b221')
def test_create_update_delete_network_subnet(self):
# Create a network
name = data_utils.rand_name('network-')
network = self.create_network(network_name=name)
self.addCleanup(self._delete_network, network)
net_id = network['id']
self.assertEqual('ACTIVE', network['status'])
# Verify network update
new_name = "New_network"
body = self.client.update_network(net_id, name=new_name)
updated_net = body['network']
self.assertEqual(updated_net['name'], new_name)
# Find a cidr that is not in use yet and create a subnet with it
subnet = self.create_subnet(network)
subnet_id = subnet['id']
# Verify subnet update
new_name = "New_subnet"
body = self.client.update_subnet(subnet_id, name=new_name)
updated_subnet = body['subnet']
self.assertEqual(updated_subnet['name'], new_name)
@test.attr(type='smoke')
@test.idempotent_id('2bf13842-c93f-4a69-83ed-717d2ec3b44e')
def test_show_network(self):
# Verify the details of a network
body = self.client.show_network(self.network['id'])
network = body['network']
for key in ['id', 'name']:
self.assertEqual(network[key], self.network[key])
@test.attr(type='smoke')
@test.idempotent_id('867819bb-c4b6-45f7-acf9-90edcf70aa5e')
def test_show_network_fields(self):
# Verify specific fields of a network
fields = ['id', 'name']
body = self.client.show_network(self.network['id'],
fields=fields)
network = body['network']
self.assertEqual(sorted(network.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(network[field_name], self.network[field_name])
@test.attr(type='smoke')
@test.idempotent_id('f7ffdeda-e200-4a7a-bcbe-05716e86bf43')
def test_list_networks(self):
# Verify the network exists in the list of all networks
body = self.client.list_networks()
networks = [network['id'] for network in body['networks']
if network['id'] == self.network['id']]
self.assertNotEmpty(networks, "Created network not found in the list")
@test.attr(type='smoke')
@test.idempotent_id('6ae6d24f-9194-4869-9c85-c313cb20e080')
def test_list_networks_fields(self):
# Verify specific fields of the networks
fields = ['id', 'name']
body = self.client.list_networks(fields=fields)
networks = body['networks']
self.assertNotEmpty(networks, "Network list returned is empty")
for network in networks:
self.assertEqual(sorted(network.keys()), sorted(fields))
@test.attr(type='smoke')
@test.idempotent_id('bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc')
def test_show_subnet(self):
# Verify the details of a subnet
body = self.client.show_subnet(self.subnet['id'])
subnet = body['subnet']
self.assertNotEmpty(subnet, "Subnet returned has no fields")
for key in ['id', 'cidr']:
self.assertIn(key, subnet)
self.assertEqual(subnet[key], self.subnet[key])
@test.attr(type='smoke')
@test.idempotent_id('270fff0b-8bfc-411f-a184-1e8fd35286f0')
def test_show_subnet_fields(self):
# Verify specific fields of a subnet
fields = ['id', 'network_id']
body = self.client.show_subnet(self.subnet['id'],
fields=fields)
subnet = body['subnet']
self.assertEqual(sorted(subnet.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(subnet[field_name], self.subnet[field_name])
@test.attr(type='smoke')
@test.idempotent_id('db68ba48-f4ea-49e9-81d1-e367f6d0b20a')
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
body = self.client.list_subnets()
subnets = [subnet['id'] for subnet in body['subnets']
if subnet['id'] == self.subnet['id']]
self.assertNotEmpty(subnets, "Created subnet not found in the list")
@test.attr(type='smoke')
@test.idempotent_id('842589e3-9663-46b0-85e4-7f01273b0412')
def test_list_subnets_fields(self):
# Verify specific fields of subnets
fields = ['id', 'network_id']
body = self.client.list_subnets(fields=fields)
subnets = body['subnets']
self.assertNotEmpty(subnets, "Subnet list returned is empty")
for subnet in subnets:
self.assertEqual(sorted(subnet.keys()), sorted(fields))
def _try_delete_network(self, net_id):
# delete network, if it exists
try:
self.client.delete_network(net_id)
# if network is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
@test.attr(type='smoke')
@test.idempotent_id('f04f61a9-b7f3-4194-90b2-9bcf660d1bfe')
def test_delete_network_with_subnet(self):
# Creates a network
name = data_utils.rand_name('network-')
body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
self.addCleanup(self._try_delete_network, net_id)
# Find a cidr that is not in use yet and create a subnet with it
subnet = self.create_subnet(network)
subnet_id = subnet['id']
# Delete network while the subnet still exists
body = self.client.delete_network(net_id)
# Verify that the subnet got automatically deleted.
self.assertRaises(lib_exc.NotFound, self.client.show_subnet,
subnet_id)
# Since create_subnet adds the subnet to the delete list, and it is
# is actually deleted here - this will create and issue, hence remove
# it from the list.
self.subnets.pop()
@test.attr(type='smoke')
@test.idempotent_id('d2d596e2-8e76-47a9-ac51-d4648009f4d3')
def test_create_delete_subnet_without_gateway(self):
self._create_verify_delete_subnet()
@test.attr(type='smoke')
@test.idempotent_id('9393b468-186d-496d-aa36-732348cd76e7')
def test_create_delete_subnet_with_gw(self):
self._create_verify_delete_subnet(
**self.subnet_dict(['gateway']))
@test.attr(type='smoke')
@test.idempotent_id('bec949c4-3147-4ba6-af5f-cd2306118404')
def test_create_delete_subnet_with_allocation_pools(self):
self._create_verify_delete_subnet(
**self.subnet_dict(['allocation_pools']))
@test.attr(type='smoke')
@test.idempotent_id('8217a149-0c6c-4cfb-93db-0486f707d13f')
def test_create_delete_subnet_with_gw_and_allocation_pools(self):
self._create_verify_delete_subnet(**self.subnet_dict(
['gateway', 'allocation_pools']))
@test.attr(type='smoke')
@test.idempotent_id('d830de0a-be47-468f-8f02-1fd996118289')
def test_create_delete_subnet_with_host_routes_and_dns_nameservers(self):
self._create_verify_delete_subnet(
**self.subnet_dict(['host_routes', 'dns_nameservers']))
@test.attr(type='smoke')
@test.idempotent_id('94ce038d-ff0a-4a4c-a56b-09da3ca0b55d')
def test_create_delete_subnet_with_dhcp_enabled(self):
self._create_verify_delete_subnet(enable_dhcp=True)
@test.attr(type='smoke')
@test.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
def test_update_subnet_gw_dns_host_routes_dhcp(self):
network = self.create_network()
self.addCleanup(self._delete_network, network)
subnet = self.create_subnet(
network, **self.subnet_dict(['gateway', 'host_routes',
'dns_nameservers',
'allocation_pools']))
subnet_id = subnet['id']
new_gateway = str(netaddr.IPAddress(
self._subnet_data[self._ip_version]['gateway']) + 1)
# Verify subnet update
new_host_routes = self._subnet_data[self._ip_version][
'new_host_routes']
new_dns_nameservers = self._subnet_data[self._ip_version][
'new_dns_nameservers']
kwargs = {'host_routes': new_host_routes,
'dns_nameservers': new_dns_nameservers,
'gateway_ip': new_gateway, 'enable_dhcp': True}
new_name = "New_subnet"
body = self.client.update_subnet(subnet_id, name=new_name,
**kwargs)
updated_subnet = body['subnet']
kwargs['name'] = new_name
self.assertEqual(sorted(updated_subnet['dns_nameservers']),
sorted(kwargs['dns_nameservers']))
del subnet['dns_nameservers'], kwargs['dns_nameservers']
self._compare_resource_attrs(updated_subnet, kwargs)
@test.attr(type='smoke')
@test.idempotent_id('a4d9ec4c-0306-4111-a75c-db01a709030b')
def test_create_delete_subnet_all_attributes(self):
self._create_verify_delete_subnet(
enable_dhcp=True,
**self.subnet_dict(['gateway', 'host_routes', 'dns_nameservers']))
@test.attr(type='smoke')
@test.idempotent_id('af774677-42a9-4e4b-bb58-16fe6a5bc1ec')
def test_external_network_visibility(self):
"""Verifies user can see external networks but not subnets."""
body = self.client.list_networks(**{'router:external': True})
networks = [network['id'] for network in body['networks']]
self.assertNotEmpty(networks, "No external networks found")
nonexternal = [net for net in body['networks'] if
not net['router:external']]
self.assertEmpty(nonexternal, "Found non-external networks"
" in filtered list (%s)." % nonexternal)
self.assertIn(CONF.network.public_network_id, networks)
subnets_iter = (network['subnets'] for network in body['networks'])
# subnets_iter is a list (iterator) of lists. This flattens it to a
# list of UUIDs
public_subnets_iter = itertools.chain(*subnets_iter)
body = self.client.list_subnets()
subnets = [sub['id'] for sub in body['subnets']
if sub['id'] in public_subnets_iter]
self.assertEmpty(subnets, "Public subnets visible")
class BulkNetworkOpsTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
bulk network creation
bulk subnet creation
bulk port creation
list tenant's networks
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
tenant_network_cidr with a block of cidr's from which smaller blocks
can be allocated for tenant networks
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant-network_cidr
"""
def _delete_networks(self, created_networks):
for n in created_networks:
self.client.delete_network(n['id'])
# Asserting that the networks are not found in the list after deletion
body = self.client.list_networks()
networks_list = [network['id'] for network in body['networks']]
for n in created_networks:
self.assertNotIn(n['id'], networks_list)
def _delete_subnets(self, created_subnets):
for n in created_subnets:
self.client.delete_subnet(n['id'])
# Asserting that the subnets are not found in the list after deletion
body = self.client.list_subnets()
subnets_list = [subnet['id'] for subnet in body['subnets']]
for n in created_subnets:
self.assertNotIn(n['id'], subnets_list)
def _delete_ports(self, created_ports):
for n in created_ports:
self.client.delete_port(n['id'])
# Asserting that the ports are not found in the list after deletion
body = self.client.list_ports()
ports_list = [port['id'] for port in body['ports']]
for n in created_ports:
self.assertNotIn(n['id'], ports_list)
@test.attr(type='smoke')
@test.idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')
def test_bulk_create_delete_network(self):
# Creates 2 networks in one request
network_names = [data_utils.rand_name('network-'),
data_utils.rand_name('network-')]
body = self.client.create_bulk_network(network_names)
created_networks = body['networks']
self.addCleanup(self._delete_networks, created_networks)
# Asserting that the networks are found in the list after creation
body = self.client.list_networks()
networks_list = [network['id'] for network in body['networks']]
for n in created_networks:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], networks_list)
@test.attr(type='smoke')
@test.idempotent_id('8936533b-c0aa-4f29-8e53-6cc873aec489')
def test_bulk_create_delete_subnet(self):
networks = [self.create_network(), self.create_network()]
# Creates 2 subnets in one request
if self._ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
else:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
mask_bits = CONF.network.tenant_network_v6_mask_bits
cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
subnets_list = []
for i in range(len(names)):
p1 = {
'network_id': networks[i]['id'],
'cidr': str(cidrs[(i)]),
'name': names[i],
'ip_version': self._ip_version
}
subnets_list.append(p1)
del subnets_list[1]['name']
body = self.client.create_bulk_subnet(subnets_list)
created_subnets = body['subnets']
self.addCleanup(self._delete_subnets, created_subnets)
# Asserting that the subnets are found in the list after creation
body = self.client.list_subnets()
subnets_list = [subnet['id'] for subnet in body['subnets']]
for n in created_subnets:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], subnets_list)
@test.attr(type='smoke')
@test.idempotent_id('48037ff2-e889-4c3b-b86a-8e3f34d2d060')
def test_bulk_create_delete_port(self):
networks = [self.create_network(), self.create_network()]
# Creates 2 ports in one request
names = [data_utils.rand_name('port-') for i in range(len(networks))]
port_list = []
state = [True, False]
for i in range(len(names)):
p1 = {
'network_id': networks[i]['id'],
'name': names[i],
'admin_state_up': state[i],
}
port_list.append(p1)
del port_list[1]['name']
body = self.client.create_bulk_port(port_list)
created_ports = body['ports']
self.addCleanup(self._delete_ports, created_ports)
# Asserting that the ports are found in the list after creation
body = self.client.list_ports()
ports_list = [port['id'] for port in body['ports']]
for n in created_ports:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], ports_list)
class BulkNetworkOpsIpV6TestJSON(BulkNetworkOpsTestJSON):
_ip_version = 6
class NetworksIpV6TestJSON(NetworksTestJSON):
_ip_version = 6
@test.attr(type='smoke')
@test.idempotent_id('e41a4888-65a6-418c-a095-f7c2ef4ad59a')
def test_create_delete_subnet_with_gw(self):
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
gateway = str(netaddr.IPAddress(net.first + 2))
name = data_utils.rand_name('network-')
network = self.create_network(network_name=name)
subnet = self.create_subnet(network, gateway)
# Verifies Subnet GW in IPv6
self.assertEqual(subnet['gateway_ip'], gateway)
@test.attr(type='smoke')
@test.idempotent_id('ebb4fd95-524f-46af-83c1-0305b239338f')
def test_create_delete_subnet_with_default_gw(self):
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
gateway_ip = str(netaddr.IPAddress(net.first + 1))
name = data_utils.rand_name('network-')
network = self.create_network(network_name=name)
subnet = self.create_subnet(network)
# Verifies Subnet GW in IPv6
self.assertEqual(subnet['gateway_ip'], gateway_ip)
@test.attr(type='smoke')
@test.idempotent_id('a9653883-b2a4-469b-8c3c-4518430a7e55')
def test_create_list_subnet_with_no_gw64_one_network(self):
name = data_utils.rand_name('network-')
network = self.create_network(name)
ipv6_gateway = self.subnet_dict(['gateway'])['gateway']
subnet1 = self.create_subnet(network,
ip_version=6,
gateway=ipv6_gateway)
self.assertEqual(netaddr.IPNetwork(subnet1['cidr']).version, 6,
'The created subnet is not IPv6')
subnet2 = self.create_subnet(network,
gateway=None,
ip_version=4)
self.assertEqual(netaddr.IPNetwork(subnet2['cidr']).version, 4,
'The created subnet is not IPv4')
# Verifies Subnet GW is set in IPv6
self.assertEqual(subnet1['gateway_ip'], ipv6_gateway)
# Verifies Subnet GW is None in IPv4
self.assertEqual(subnet2['gateway_ip'], None)
# Verifies all 2 subnets in the same network
body = self.client.list_subnets()
subnets = [sub['id'] for sub in body['subnets']
if sub['network_id'] == network['id']]
test_subnet_ids = [sub['id'] for sub in (subnet1, subnet2)]
self.assertItemsEqual(subnets,
test_subnet_ids,
'Subnet are not in the same network')
class NetworksIpV6TestAttrs(NetworksIpV6TestJSON):
@classmethod
def resource_setup(cls):
if not CONF.network_feature_enabled.ipv6_subnet_attributes:
raise cls.skipException("IPv6 extended attributes for "
"subnets not available")
super(NetworksIpV6TestAttrs, cls).resource_setup()
@test.attr(type='smoke')
@test.idempotent_id('da40cd1b-a833-4354-9a85-cd9b8a3b74ca')
def test_create_delete_subnet_with_v6_attributes_stateful(self):
self._create_verify_delete_subnet(
gateway=self._subnet_data[self._ip_version]['gateway'],
ipv6_ra_mode='dhcpv6-stateful',
ipv6_address_mode='dhcpv6-stateful')
@test.attr(type='smoke')
@test.idempotent_id('176b030f-a923-4040-a755-9dc94329e60c')
def test_create_delete_subnet_with_v6_attributes_slaac(self):
self._create_verify_delete_subnet(
ipv6_ra_mode='slaac',
ipv6_address_mode='slaac')
@test.attr(type='smoke')
@test.idempotent_id('7d410310-8c86-4902-adf9-865d08e31adb')
def test_create_delete_subnet_with_v6_attributes_stateless(self):
self._create_verify_delete_subnet(
ipv6_ra_mode='dhcpv6-stateless',
ipv6_address_mode='dhcpv6-stateless')
def _test_delete_subnet_with_ports(self, mode):
"""Create subnet and delete it with existing ports"""
slaac_network = self.create_network()
subnet_slaac = self.create_subnet(slaac_network,
**{'ipv6_ra_mode': mode,
'ipv6_address_mode': mode})
port = self.create_port(slaac_network)
self.assertIsNotNone(port['fixed_ips'][0]['ip_address'])
self.client.delete_subnet(subnet_slaac['id'])
self.subnets.pop()
subnets = self.client.list_subnets()
subnet_ids = [subnet['id'] for subnet in subnets['subnets']]
self.assertNotIn(subnet_slaac['id'], subnet_ids,
"Subnet wasn't deleted")
self.assertRaisesRegexp(
lib_exc.Conflict,
"There are one or more ports still in use on the network",
self.client.delete_network,
slaac_network['id'])
@test.attr(type='smoke')
@test.idempotent_id('88554555-ebf8-41ef-9300-4926d45e06e9')
def test_create_delete_slaac_subnet_with_ports(self):
"""Test deleting subnet with SLAAC ports
Create subnet with SLAAC, create ports in network
and then you shall be able to delete subnet without port
deletion. But you still can not delete the network.
"""
self._test_delete_subnet_with_ports("slaac")
@test.attr(type='smoke')
@test.idempotent_id('2de6ab5a-fcf0-4144-9813-f91a940291f1')
def test_create_delete_stateless_subnet_with_ports(self):
"""Test deleting subnet with DHCPv6 stateless ports
Create subnet with DHCPv6 stateless, create ports in network
and then you shall be able to delete subnet without port
deletion. But you still can not delete the network.
"""
self._test_delete_subnet_with_ports("dhcpv6-stateless")
|
|
import pymongo
import re
from pymongo.read_preferences import ReadPreference
from bson.dbref import DBRef
from mongoengine import signals
from mongoengine.common import _import_class
from mongoengine.base import (
DocumentMetaclass,
TopLevelDocumentMetaclass,
BaseDocument,
BaseDict,
BaseList,
EmbeddedDocumentList,
ALLOW_INHERITANCE,
get_document
)
from mongoengine.errors import InvalidQueryError, InvalidDocumentError
from mongoengine.queryset import (OperationError, NotUniqueError,
QuerySet, transform)
from mongoengine.connection import get_db, DEFAULT_CONNECTION_NAME
from mongoengine.context_managers import switch_db, switch_collection
__all__ = ('Document', 'EmbeddedDocument', 'DynamicDocument',
'DynamicEmbeddedDocument', 'OperationError',
'InvalidCollectionError', 'NotUniqueError', 'MapReduceDocument')
def includes_cls(fields):
""" Helper function used for ensuring and comparing indexes
"""
first_field = None
if len(fields):
if isinstance(fields[0], basestring):
first_field = fields[0]
elif isinstance(fields[0], (list, tuple)) and len(fields[0]):
first_field = fields[0][0]
return first_field == '_cls'
class InvalidCollectionError(Exception):
pass
class EmbeddedDocument(BaseDocument):
"""A :class:`~mongoengine.Document` that isn't stored in its own
collection. :class:`~mongoengine.EmbeddedDocument`\ s should be used as
fields on :class:`~mongoengine.Document`\ s through the
:class:`~mongoengine.EmbeddedDocumentField` field type.
A :class:`~mongoengine.EmbeddedDocument` subclass may be itself subclassed,
to create a specialised version of the embedded document that will be
stored in the same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of
`_cls` set :attr:`allow_inheritance` to ``False`` in the :attr:`meta`
dictionary.
"""
__slots__ = ('_instance')
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = DocumentMetaclass
__metaclass__ = DocumentMetaclass
def __init__(self, *args, **kwargs):
super(EmbeddedDocument, self).__init__(*args, **kwargs)
self._instance = None
self._changed_fields = []
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._data == other._data
return False
def __ne__(self, other):
return not self.__eq__(other)
def save(self, *args, **kwargs):
self._instance.save(*args, **kwargs)
def reload(self, *args, **kwargs):
self._instance.reload(*args, **kwargs)
class Document(BaseDocument):
"""The base class used for defining the structure and properties of
collections of documents stored in MongoDB. Inherit from this class, and
add fields as class attributes to define a document's structure.
Individual documents may then be created by making instances of the
:class:`~mongoengine.Document` subclass.
By default, the MongoDB collection used to store documents created using a
:class:`~mongoengine.Document` subclass will be the name of the subclass
converted to lowercase. A different collection may be specified by
providing :attr:`collection` to the :attr:`meta` dictionary in the class
definition.
A :class:`~mongoengine.Document` subclass may be itself subclassed, to
create a specialised version of the document that will be stored in the
same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of
`_cls` set :attr:`allow_inheritance` to ``False`` in the :attr:`meta`
dictionary.
A :class:`~mongoengine.Document` may use a **Capped Collection** by
specifying :attr:`max_documents` and :attr:`max_size` in the :attr:`meta`
dictionary. :attr:`max_documents` is the maximum number of documents that
is allowed to be stored in the collection, and :attr:`max_size` is the
maximum size of the collection in bytes. If :attr:`max_size` is not
specified and :attr:`max_documents` is, :attr:`max_size` defaults to
10000000 bytes (10MB).
Indexes may be created by specifying :attr:`indexes` in the :attr:`meta`
dictionary. The value should be a list of field names or tuples of field
names. Index direction may be specified by prefixing the field names with
a **+** or **-** sign.
Automatic index creation can be disabled by specifying
:attr:`auto_create_index` in the :attr:`meta` dictionary. If this is set to
False then indexes will not be created by MongoEngine. This is useful in
production systems where index creation is performed as part of a
deployment system.
By default, _cls will be added to the start of every index (that
doesn't contain a list) if allow_inheritance is True. This can be
disabled by either setting cls to False on the specific index or
by setting index_cls to False on the meta dictionary for the document.
By default, any extra attribute existing in stored data but not declared
in your model will raise a :class:`~mongoengine.FieldDoesNotExist` error.
This can be disabled by setting :attr:`strict` to ``False``
in the :attr:`meta` dictionnary.
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = TopLevelDocumentMetaclass
__metaclass__ = TopLevelDocumentMetaclass
__slots__ = ('__objects')
def pk():
"""Primary key alias
"""
def fget(self):
return getattr(self, self._meta['id_field'])
def fset(self, value):
return setattr(self, self._meta['id_field'], value)
return property(fget, fset)
pk = pk()
@classmethod
def _get_db(cls):
"""Some Model using other db_alias"""
return get_db(cls._meta.get("db_alias", DEFAULT_CONNECTION_NAME))
@classmethod
def _get_collection(cls):
"""Returns the collection for the document."""
# TODO: use new get_collection() with PyMongo3 ?
if not hasattr(cls, '_collection') or cls._collection is None:
db = cls._get_db()
collection_name = cls._get_collection_name()
# Create collection as a capped collection if specified
if cls._meta['max_size'] or cls._meta['max_documents']:
# Get max document limit and max byte size from meta
max_size = cls._meta['max_size'] or 10000000 # 10MB default
max_documents = cls._meta['max_documents']
if collection_name in db.collection_names():
cls._collection = db[collection_name]
# The collection already exists, check if its capped
# options match the specified capped options
options = cls._collection.options()
if options.get('max') != max_documents or \
options.get('size') != max_size:
msg = (('Cannot create collection "%s" as a capped '
'collection as it already exists')
% cls._collection)
raise InvalidCollectionError(msg)
else:
# Create the collection as a capped collection
opts = {'capped': True, 'size': max_size}
if max_documents:
opts['max'] = max_documents
cls._collection = db.create_collection(
collection_name, **opts
)
else:
cls._collection = db[collection_name]
if cls._meta.get('auto_create_index', True):
cls.ensure_indexes()
return cls._collection
def modify(self, query={}, **update):
"""Perform an atomic update of the document in the database and reload
the document object using updated version.
Returns True if the document has been updated or False if the document
in the database doesn't match the query.
.. note:: All unsaved changes that has been made to the document are
rejected if the method returns True.
:param query: the update will be performed only if the document in the
database matches the query
:param update: Django-style update keyword arguments
"""
if self.pk is None:
raise InvalidDocumentError("The document does not have a primary key.")
id_field = self._meta["id_field"]
query = query.copy() if isinstance(query, dict) else query.to_query(self)
if id_field not in query:
query[id_field] = self.pk
elif query[id_field] != self.pk:
raise InvalidQueryError("Invalid document modify query: it must modify only this document.")
updated = self._qs(**query).modify(new=True, **update)
if updated is None:
return False
for field in self._fields_ordered:
setattr(self, field, self._reload(field, updated[field]))
self._changed_fields = updated._changed_fields
self._created = False
return True
def save(self, force_insert=False, validate=True, clean=True,
write_concern=None, cascade=None, cascade_kwargs=None,
_refs=None, save_condition=None, **kwargs):
"""Save the :class:`~mongoengine.Document` to the database. If the
document already exists, it will be updated, otherwise it will be
created.
:param force_insert: only try to create a new document, don't allow
updates of existing documents
:param validate: validates the document; set to ``False`` to skip.
:param clean: call the document clean method, requires `validate` to be
True.
:param write_concern: Extra keyword arguments are passed down to
:meth:`~pymongo.collection.Collection.save` OR
:meth:`~pymongo.collection.Collection.insert`
which will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
:param cascade: Sets the flag for cascading saves. You can set a
default by setting "cascade" in the document __meta__
:param cascade_kwargs: (optional) kwargs dictionary to be passed throw
to cascading saves. Implies ``cascade=True``.
:param _refs: A list of processed references used in cascading saves
:param save_condition: only perform save if matching record in db
satisfies condition(s) (e.g., version number)
.. versionchanged:: 0.5
In existing documents it only saves changed fields using
set / unset. Saves are cascaded and any
:class:`~bson.dbref.DBRef` objects that have changes are
saved as well.
.. versionchanged:: 0.6
Added cascading saves
.. versionchanged:: 0.8
Cascade saves are optional and default to False. If you want
fine grain control then you can turn off using document
meta['cascade'] = True. Also you can pass different kwargs to
the cascade save using cascade_kwargs which overwrites the
existing kwargs with custom values.
.. versionchanged:: 0.8.5
Optional save_condition that only overwrites existing documents
if the condition is satisfied in the current db record.
"""
signals.pre_save.send(self.__class__, document=self)
if validate:
self.validate(clean=clean)
if write_concern is None:
write_concern = {"w": 1}
doc = self.to_mongo()
created = ('_id' not in doc or self._created or force_insert)
signals.pre_save_post_validation.send(self.__class__, document=self,
created=created)
try:
collection = self._get_collection()
if self._meta.get('auto_create_index', True):
self.ensure_indexes()
if created:
if force_insert:
object_id = collection.insert(doc, **write_concern)
else:
object_id = collection.save(doc, **write_concern)
# In PyMongo 3.0, the save() call calls internally the _update() call
# but they forget to return the _id value passed back, therefore getting it back here
# Correct behaviour in 2.X and in 3.0.1+ versions
if not object_id and pymongo.version_tuple == (3, 0):
pk_as_mongo_obj = self._fields.get(self._meta['id_field']).to_mongo(self.pk)
object_id = self._qs.filter(pk=pk_as_mongo_obj).first() and \
self._qs.filter(pk=pk_as_mongo_obj).first().pk
else:
object_id = doc['_id']
updates, removals = self._delta()
# Need to add shard key to query, or you get an error
if save_condition is not None:
select_dict = transform.query(self.__class__,
**save_condition)
else:
select_dict = {}
select_dict['_id'] = object_id
shard_key = self.__class__._meta.get('shard_key', tuple())
for k in shard_key:
actual_key = self._db_field_map.get(k, k)
select_dict[actual_key] = doc[actual_key]
def is_new_object(last_error):
if last_error is not None:
updated = last_error.get("updatedExisting")
if updated is not None:
return not updated
return created
update_query = {}
if updates:
update_query["$set"] = updates
if removals:
update_query["$unset"] = removals
if updates or removals:
upsert = save_condition is None
last_error = collection.update(select_dict, update_query,
upsert=upsert, **write_concern)
created = is_new_object(last_error)
if cascade is None:
cascade = self._meta.get(
'cascade', False) or cascade_kwargs is not None
if cascade:
kwargs = {
"force_insert": force_insert,
"validate": validate,
"write_concern": write_concern,
"cascade": cascade
}
if cascade_kwargs: # Allow granular control over cascades
kwargs.update(cascade_kwargs)
kwargs['_refs'] = _refs
self.cascade_save(**kwargs)
except pymongo.errors.DuplicateKeyError, err:
message = u'Tried to save duplicate unique keys (%s)'
raise NotUniqueError(message % unicode(err))
except pymongo.errors.OperationFailure, err:
message = 'Could not save document (%s)'
if re.match('^E1100[01] duplicate key', unicode(err)):
# E11000 - duplicate key error index
# E11001 - duplicate key on update
message = u'Tried to save duplicate unique keys (%s)'
raise NotUniqueError(message % unicode(err))
raise OperationError(message % unicode(err))
id_field = self._meta['id_field']
if created or id_field not in self._meta.get('shard_key', []):
self[id_field] = self._fields[id_field].to_python(object_id)
signals.post_save.send(self.__class__, document=self, created=created)
self._clear_changed_fields()
self._created = False
return self
def cascade_save(self, *args, **kwargs):
"""Recursively saves any references /
generic references on an objects"""
_refs = kwargs.get('_refs', []) or []
ReferenceField = _import_class('ReferenceField')
GenericReferenceField = _import_class('GenericReferenceField')
for name, cls in self._fields.items():
if not isinstance(cls, (ReferenceField,
GenericReferenceField)):
continue
ref = self._data.get(name)
if not ref or isinstance(ref, DBRef):
continue
if not getattr(ref, '_changed_fields', True):
continue
ref_id = "%s,%s" % (ref.__class__.__name__, str(ref._data))
if ref and ref_id not in _refs:
_refs.append(ref_id)
kwargs["_refs"] = _refs
ref.save(**kwargs)
ref._changed_fields = []
@property
def _qs(self):
"""
Returns the queryset to use for updating / reloading / deletions
"""
if not hasattr(self, '__objects'):
self.__objects = QuerySet(self, self._get_collection())
return self.__objects
@property
def _object_key(self):
"""Dict to identify object in collection
"""
select_dict = {'pk': self.pk}
shard_key = self.__class__._meta.get('shard_key', tuple())
for k in shard_key:
select_dict[k] = getattr(self, k)
return select_dict
def update(self, **kwargs):
"""Performs an update on the :class:`~mongoengine.Document`
A convenience wrapper to :meth:`~mongoengine.QuerySet.update`.
Raises :class:`OperationError` if called on an object that has not yet
been saved.
"""
if not self.pk:
if kwargs.get('upsert', False):
query = self.to_mongo()
if "_cls" in query:
del(query["_cls"])
return self._qs.filter(**query).update_one(**kwargs)
else:
raise OperationError(
'attempt to update a document not yet saved')
# Need to add shard key to query, or you get an error
return self._qs.filter(**self._object_key).update_one(**kwargs)
def delete(self, **write_concern):
"""Delete the :class:`~mongoengine.Document` from the database. This
will only take effect if the document has been previously saved.
:param write_concern: Extra keyword arguments are passed down which
will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
"""
signals.pre_delete.send(self.__class__, document=self)
# Delete FileFields separately
FileField = _import_class('FileField')
for name, field in self._fields.iteritems():
if isinstance(field, FileField):
getattr(self, name).delete()
try:
self._qs.filter(
**self._object_key).delete(write_concern=write_concern, _from_doc_delete=True)
except pymongo.errors.OperationFailure, err:
message = u'Could not delete document (%s)' % err.message
raise OperationError(message)
signals.post_delete.send(self.__class__, document=self)
def switch_db(self, db_alias):
"""
Temporarily switch the database for a document instance.
Only really useful for archiving off data and calling `save()`::
user = User.objects.get(id=user_id)
user.switch_db('archive-db')
user.save()
:param str db_alias: The database alias to use for saving the document
.. seealso::
Use :class:`~mongoengine.context_managers.switch_collection`
if you need to read from another collection
"""
with switch_db(self.__class__, db_alias) as cls:
collection = cls._get_collection()
db = cls._get_db()
self._get_collection = lambda: collection
self._get_db = lambda: db
self._collection = collection
self._created = True
self.__objects = self._qs
self.__objects._collection_obj = collection
return self
def switch_collection(self, collection_name):
"""
Temporarily switch the collection for a document instance.
Only really useful for archiving off data and calling `save()`::
user = User.objects.get(id=user_id)
user.switch_collection('old-users')
user.save()
:param str collection_name: The database alias to use for saving the
document
.. seealso::
Use :class:`~mongoengine.context_managers.switch_db`
if you need to read from another database
"""
with switch_collection(self.__class__, collection_name) as cls:
collection = cls._get_collection()
self._get_collection = lambda: collection
self._collection = collection
self._created = True
self.__objects = self._qs
self.__objects._collection_obj = collection
return self
def select_related(self, max_depth=1):
"""Handles dereferencing of :class:`~bson.dbref.DBRef` objects to
a maximum depth in order to cut down the number queries to mongodb.
.. versionadded:: 0.5
"""
DeReference = _import_class('DeReference')
DeReference()([self], max_depth + 1)
return self
def reload(self, *fields, **kwargs):
"""Reloads all attributes from the database.
:param fields: (optional) args list of fields to reload
:param max_depth: (optional) depth of dereferencing to follow
.. versionadded:: 0.1.2
.. versionchanged:: 0.6 Now chainable
.. versionchanged:: 0.9 Can provide specific fields to reload
"""
max_depth = 1
if fields and isinstance(fields[0], int):
max_depth = fields[0]
fields = fields[1:]
elif "max_depth" in kwargs:
max_depth = kwargs["max_depth"]
if not self.pk:
raise self.DoesNotExist("Document does not exist")
obj = self._qs.read_preference(ReadPreference.PRIMARY).filter(
**self._object_key).only(*fields).limit(1
).select_related(max_depth=max_depth)
if obj:
obj = obj[0]
else:
raise self.DoesNotExist("Document does not exist")
for field in self._fields_ordered:
if not fields or field in fields:
try:
setattr(self, field, self._reload(field, obj[field]))
except KeyError:
# If field is removed from the database while the object
# is in memory, a reload would cause a KeyError
# i.e. obj.update(unset__field=1) followed by obj.reload()
delattr(self, field)
self._changed_fields = obj._changed_fields
self._created = False
return self
def _reload(self, key, value):
"""Used by :meth:`~mongoengine.Document.reload` to ensure the
correct instance is linked to self.
"""
if isinstance(value, BaseDict):
value = [(k, self._reload(k, v)) for k, v in value.items()]
value = BaseDict(value, self, key)
elif isinstance(value, EmbeddedDocumentList):
value = [self._reload(key, v) for v in value]
value = EmbeddedDocumentList(value, self, key)
elif isinstance(value, BaseList):
value = [self._reload(key, v) for v in value]
value = BaseList(value, self, key)
elif isinstance(value, (EmbeddedDocument, DynamicEmbeddedDocument)):
value._instance = None
value._changed_fields = []
return value
def to_dbref(self):
"""Returns an instance of :class:`~bson.dbref.DBRef` useful in
`__raw__` queries."""
if not self.pk:
msg = "Only saved documents can have a valid dbref"
raise OperationError(msg)
return DBRef(self.__class__._get_collection_name(), self.pk)
@classmethod
def register_delete_rule(cls, document_cls, field_name, rule):
"""This method registers the delete rules to apply when removing this
object.
"""
classes = [get_document(class_name)
for class_name in cls._subclasses
if class_name != cls.__name__] + [cls]
documents = [get_document(class_name)
for class_name in document_cls._subclasses
if class_name != document_cls.__name__] + [document_cls]
for cls in classes:
for document_cls in documents:
delete_rules = cls._meta.get('delete_rules') or {}
delete_rules[(document_cls, field_name)] = rule
cls._meta['delete_rules'] = delete_rules
@classmethod
def drop_collection(cls):
"""Drops the entire collection associated with this
:class:`~mongoengine.Document` type from the database.
"""
cls._collection = None
db = cls._get_db()
db.drop_collection(cls._get_collection_name())
@classmethod
def ensure_index(cls, key_or_list, drop_dups=False, background=False,
**kwargs):
"""Ensure that the given indexes are in place.
:param key_or_list: a single index key or a list of index keys (to
construct a multi-field index); keys may be prefixed with a **+**
or a **-** to determine the index ordering
"""
index_spec = cls._build_index_spec(key_or_list)
index_spec = index_spec.copy()
fields = index_spec.pop('fields')
index_spec['drop_dups'] = drop_dups
index_spec['background'] = background
index_spec.update(kwargs)
return cls._get_collection().ensure_index(fields, **index_spec)
@classmethod
def ensure_indexes(cls):
"""Checks the document meta data and ensures all the indexes exist.
Global defaults can be set in the meta - see :doc:`guide/defining-documents`
.. note:: You can disable automatic index creation by setting
`auto_create_index` to False in the documents meta data
"""
background = cls._meta.get('index_background', False)
drop_dups = cls._meta.get('index_drop_dups', False)
index_opts = cls._meta.get('index_opts') or {}
index_cls = cls._meta.get('index_cls', True)
collection = cls._get_collection()
# 746: when connection is via mongos, the read preference is not necessarily an indication that
# this code runs on a secondary
if not collection.is_mongos and collection.read_preference > 1:
return
# determine if an index which we are creating includes
# _cls as its first field; if so, we can avoid creating
# an extra index on _cls, as mongodb will use the existing
# index to service queries against _cls
cls_indexed = False
# Ensure document-defined indexes are created
if cls._meta['index_specs']:
index_spec = cls._meta['index_specs']
for spec in index_spec:
spec = spec.copy()
fields = spec.pop('fields')
cls_indexed = cls_indexed or includes_cls(fields)
opts = index_opts.copy()
opts.update(spec)
# we shouldn't pass 'cls' to the collection.ensureIndex options
# because of https://jira.mongodb.org/browse/SERVER-769
if 'cls' in opts:
del opts['cls']
collection.ensure_index(fields, background=background,
drop_dups=drop_dups, **opts)
# If _cls is being used (for polymorphism), it needs an index,
# only if another index doesn't begin with _cls
if (index_cls and not cls_indexed and
cls._meta.get('allow_inheritance', ALLOW_INHERITANCE) is True):
# we shouldn't pass 'cls' to the collection.ensureIndex options
# because of https://jira.mongodb.org/browse/SERVER-769
if 'cls' in index_opts:
del index_opts['cls']
collection.ensure_index('_cls', background=background,
**index_opts)
@classmethod
def list_indexes(cls, go_up=True, go_down=True):
""" Lists all of the indexes that should be created for given
collection. It includes all the indexes from super- and sub-classes.
"""
if cls._meta.get('abstract'):
return []
# get all the base classes, subclasses and siblings
classes = []
def get_classes(cls):
if (cls not in classes and
isinstance(cls, TopLevelDocumentMetaclass)):
classes.append(cls)
for base_cls in cls.__bases__:
if (isinstance(base_cls, TopLevelDocumentMetaclass) and
base_cls != Document and
not base_cls._meta.get('abstract') and
base_cls._get_collection().full_name == cls._get_collection().full_name and
base_cls not in classes):
classes.append(base_cls)
get_classes(base_cls)
for subclass in cls.__subclasses__():
if (isinstance(base_cls, TopLevelDocumentMetaclass) and
subclass._get_collection().full_name == cls._get_collection().full_name and
subclass not in classes):
classes.append(subclass)
get_classes(subclass)
get_classes(cls)
# get the indexes spec for all of the gathered classes
def get_indexes_spec(cls):
indexes = []
if cls._meta['index_specs']:
index_spec = cls._meta['index_specs']
for spec in index_spec:
spec = spec.copy()
fields = spec.pop('fields')
indexes.append(fields)
return indexes
indexes = []
for cls in classes:
for index in get_indexes_spec(cls):
if index not in indexes:
indexes.append(index)
# finish up by appending { '_id': 1 } and { '_cls': 1 }, if needed
if [(u'_id', 1)] not in indexes:
indexes.append([(u'_id', 1)])
if (cls._meta.get('index_cls', True) and
cls._meta.get('allow_inheritance', ALLOW_INHERITANCE) is True):
indexes.append([(u'_cls', 1)])
return indexes
@classmethod
def compare_indexes(cls):
""" Compares the indexes defined in MongoEngine with the ones existing
in the database. Returns any missing/extra indexes.
"""
required = cls.list_indexes()
existing = [info['key']
for info in cls._get_collection().index_information().values()]
missing = [index for index in required if index not in existing]
extra = [index for index in existing if index not in required]
# if { _cls: 1 } is missing, make sure it's *really* necessary
if [(u'_cls', 1)] in missing:
cls_obsolete = False
for index in existing:
if includes_cls(index) and index not in extra:
cls_obsolete = True
break
if cls_obsolete:
missing.remove([(u'_cls', 1)])
return {'missing': missing, 'extra': extra}
class DynamicDocument(Document):
"""A Dynamic Document class allowing flexible, expandable and uncontrolled
schemas. As a :class:`~mongoengine.Document` subclass, acts in the same
way as an ordinary document but has expando style properties. Any data
passed or set against the :class:`~mongoengine.DynamicDocument` that is
not a field is automatically converted into a
:class:`~mongoengine.fields.DynamicField` and data can be attributed to that
field.
.. note::
There is one caveat on Dynamic Documents: fields cannot start with `_`
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = TopLevelDocumentMetaclass
__metaclass__ = TopLevelDocumentMetaclass
_dynamic = True
def __delattr__(self, *args, **kwargs):
"""Deletes the attribute by setting to None and allowing _delta to unset
it"""
field_name = args[0]
if field_name in self._dynamic_fields:
setattr(self, field_name, None)
else:
super(DynamicDocument, self).__delattr__(*args, **kwargs)
class DynamicEmbeddedDocument(EmbeddedDocument):
"""A Dynamic Embedded Document class allowing flexible, expandable and
uncontrolled schemas. See :class:`~mongoengine.DynamicDocument` for more
information about dynamic documents.
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = DocumentMetaclass
__metaclass__ = DocumentMetaclass
_dynamic = True
def __delattr__(self, *args, **kwargs):
"""Deletes the attribute by setting to None and allowing _delta to unset
it"""
field_name = args[0]
if field_name in self._fields:
default = self._fields[field_name].default
if callable(default):
default = default()
setattr(self, field_name, default)
else:
setattr(self, field_name, None)
class MapReduceDocument(object):
"""A document returned from a map/reduce query.
:param collection: An instance of :class:`~pymongo.Collection`
:param key: Document/result key, often an instance of
:class:`~bson.objectid.ObjectId`. If supplied as
an ``ObjectId`` found in the given ``collection``,
the object can be accessed via the ``object`` property.
:param value: The result(s) for this key.
.. versionadded:: 0.3
"""
def __init__(self, document, collection, key, value):
self._document = document
self._collection = collection
self.key = key
self.value = value
@property
def object(self):
"""Lazy-load the object referenced by ``self.key``. ``self.key``
should be the ``primary_key``.
"""
id_field = self._document()._meta['id_field']
id_field_type = type(id_field)
if not isinstance(self.key, id_field_type):
try:
self.key = id_field_type(self.key)
except:
raise Exception("Could not cast key as %s" %
id_field_type.__name__)
if not hasattr(self, "_key_object"):
self._key_object = self._document.objects.with_id(self.key)
return self._key_object
return self._key_object
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
from django.utils.translation import ugettext_lazy as _
from saharaclient.api import base as api_base
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import sahara as saharaclient
from openstack_dashboard.dashboards.project.data_processing. \
utils import helpers as helpers
from openstack_dashboard.dashboards.project.data_processing. \
utils import anti_affinity as aa
import openstack_dashboard.dashboards.project.data_processing. \
utils.workflow_helpers as whelpers
LOG = logging.getLogger(__name__)
class SelectPluginAction(workflows.Action):
hidden_create_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
def __init__(self, request, *args, **kwargs):
super(SelectPluginAction, self).__init__(request, *args, **kwargs)
try:
plugins = saharaclient.plugin_list(request)
except Exception:
plugins = []
exceptions.handle(request,
_("Unable to fetch plugin list."))
plugin_choices = [(plugin.name, plugin.title) for plugin in plugins]
self.fields["plugin_name"] = forms.ChoiceField(
label=_("Plugin name"),
choices=plugin_choices,
widget=forms.Select(attrs={"class": "plugin_name_choice"}))
for plugin in plugins:
field_name = plugin.name + "_version"
choice_field = forms.ChoiceField(
label=_("Version"),
choices=[(version, version) for version in plugin.versions],
widget=forms.Select(
attrs={"class": "plugin_version_choice "
+ field_name + "_choice"})
)
self.fields[field_name] = choice_field
class Meta(object):
name = _("Select plugin and hadoop version for cluster template")
help_text_template = ("project/data_processing.cluster_templates/"
"_create_general_help.html")
class SelectPlugin(workflows.Step):
action_class = SelectPluginAction
class CreateClusterTemplate(workflows.Workflow):
slug = "create_cluster_template"
name = _("Create Cluster Template")
finalize_button_name = _("Next")
success_message = _("Created")
failure_message = _("Could not create")
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (SelectPlugin,)
class GeneralConfigAction(workflows.Action):
hidden_configure_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_configure_field"}))
hidden_to_delete_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_to_delete_field"}))
cluster_template_name = forms.CharField(label=_("Template Name"))
description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
anti_affinity = aa.anti_affinity_field()
def __init__(self, request, *args, **kwargs):
super(GeneralConfigAction, self).__init__(request, *args, **kwargs)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
self.fields["plugin_name"] = forms.CharField(
widget=forms.HiddenInput(),
initial=plugin
)
self.fields["hadoop_version"] = forms.CharField(
widget=forms.HiddenInput(),
initial=hadoop_version
)
populate_anti_affinity_choices = aa.populate_anti_affinity_choices
def get_help_text(self):
extra = dict()
plugin, hadoop_version = whelpers\
.get_plugin_and_hadoop_version(self.request)
extra["plugin_name"] = plugin
extra["hadoop_version"] = hadoop_version
return super(GeneralConfigAction, self).get_help_text(extra)
def clean(self):
cleaned_data = super(GeneralConfigAction, self).clean()
if cleaned_data.get("hidden_configure_field", None) \
== "create_nodegroup":
self._errors = dict()
return cleaned_data
class Meta(object):
name = _("Details")
help_text_template = ("project/data_processing.cluster_templates/"
"_configure_general_help.html")
class GeneralConfig(workflows.Step):
action_class = GeneralConfigAction
contributes = ("hidden_configure_field", )
def contribute(self, data, context):
for k, v in data.items():
context["general_" + k] = v
post = self.workflow.request.POST
context['anti_affinity_info'] = post.getlist("anti_affinity")
return context
class ConfigureNodegroupsAction(workflows.Action):
hidden_nodegroups_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_nodegroups_field"}))
forms_ids = forms.CharField(
required=False,
widget=forms.HiddenInput())
def __init__(self, request, *args, **kwargs):
super(ConfigureNodegroupsAction, self). \
__init__(request, *args, **kwargs)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
self.templates = saharaclient.nodegroup_template_find(
request, plugin_name=plugin, hadoop_version=hadoop_version)
deletable = request.REQUEST.get("deletable", dict())
if 'forms_ids' in request.POST:
self.groups = []
for id in json.loads(request.POST['forms_ids']):
group_name = "group_name_" + str(id)
template_id = "template_id_" + str(id)
count = "count_" + str(id)
serialized = "serialized_" + str(id)
self.groups.append({"name": request.POST[group_name],
"template_id": request.POST[template_id],
"count": request.POST[count],
"id": id,
"deletable": deletable.get(
request.POST[group_name], "true"),
"serialized": request.POST[serialized]})
whelpers.build_node_group_fields(self,
group_name,
template_id,
count,
serialized)
def clean(self):
cleaned_data = super(ConfigureNodegroupsAction, self).clean()
if cleaned_data.get("hidden_nodegroups_field", None) \
== "create_nodegroup":
self._errors = dict()
return cleaned_data
class Meta(object):
name = _("Node Groups")
class ConfigureNodegroups(workflows.Step):
action_class = ConfigureNodegroupsAction
contributes = ("hidden_nodegroups_field", )
template_name = ("project/data_processing.cluster_templates/"
"cluster_node_groups_template.html")
def contribute(self, data, context):
for k, v in data.items():
context["ng_" + k] = v
return context
class ConfigureClusterTemplate(whelpers.ServiceParametersWorkflow,
whelpers.StatusFormatMixin):
slug = "configure_cluster_template"
name = _("Create Cluster Template")
finalize_button_name = _("Create")
success_message = _("Created Cluster Template %s")
name_property = "general_cluster_template_name"
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (GeneralConfig,
ConfigureNodegroups)
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
ConfigureClusterTemplate._cls_registry = set([])
hlps = helpers.Helpers(request)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
general_parameters = hlps.get_cluster_general_configs(
plugin,
hadoop_version)
service_parameters = hlps.get_targeted_cluster_configs(
plugin,
hadoop_version)
self._populate_tabs(general_parameters, service_parameters)
super(ConfigureClusterTemplate, self).__init__(request,
context_seed,
entry_point,
*args, **kwargs)
def is_valid(self):
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
errors_fields = list(step.action.errors.keys())
step.action.errors_fields = errors_fields
if not steps_valid:
return steps_valid
return self.validate(self.context)
def handle(self, request, context):
try:
node_groups = []
configs_dict = whelpers.parse_configs_from_context(context,
self.defaults)
ids = json.loads(context['ng_forms_ids'])
for id in ids:
name = context['ng_group_name_' + str(id)]
template_id = context['ng_template_id_' + str(id)]
count = context['ng_count_' + str(id)]
raw_ng = context.get("ng_serialized_" + str(id))
if raw_ng and raw_ng != 'null':
ng = json.loads(base64.urlsafe_b64decode(str(raw_ng)))
else:
ng = dict()
ng["name"] = name
ng["count"] = count
if template_id and template_id != u'None':
ng["node_group_template_id"] = template_id
node_groups.append(ng)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
# TODO(nkonovalov): Fix client to support default_image_id
saharaclient.cluster_template_create(
request,
context["general_cluster_template_name"],
plugin,
hadoop_version,
context["general_description"],
configs_dict,
node_groups,
context["anti_affinity_info"],
)
return True
except api_base.APIException as e:
self.error_description = str(e)
return False
except Exception:
exceptions.handle(request,
_("Cluster template creation failed"))
return False
|
|
import time, glob, os
from itertools import cycle
import sha
from mako import exceptions
from mako.template import Template
from mako.lookup import TemplateLookup
from galaxy.web.base.controller import *
try:
import pkg_resources
pkg_resources.require("GeneTrack")
import atlas
from atlas import sql
from atlas import hdf
from atlas import util as atlas_utils
from atlas.web import formlib, feature_query, feature_filter
from atlas.web import label_cache as atlas_label_cache
from atlas.plotting.const import *
from atlas.plotting.tracks import prefab
from atlas.plotting.tracks import chart
from atlas.plotting import tracks
except Exception, exc:
raise ControllerUnavailable("GeneTrack could not import a required dependency: %s" % str(exc))
pkg_resources.require( "Paste" )
import paste.httpexceptions
# Database helpers
SHOW_LABEL_LIMIT = 10000
def list_labels(session):
"""
Returns a list of labels that will be plotted in order.
"""
labels = sql.Label
query = session.query(labels).order_by("-id")
return query
def open_databases( conf ):
"""
A helper function that returns handles to the hdf and sql databases
"""
db = hdf.hdf_open( conf.HDF_DATABASE, mode='r' )
session = sql.get_session( conf.SQL_URI )
return db, session
def hdf_query(db, name, param, autosize=False ):
"""
Schema specific hdf query.
Note that returns data as columns not rows.
"""
if not hdf.has_node(db=db, name=name):
atlas.warn( 'missing label %s' % name )
return [], [], [], []
data = hdf.GroupData( db=db, name=name)
istart, iend = data.get_indices(label=param.chrom, start=param.start, stop=param.end)
table = data.get_table(label=param.chrom)
if autosize:
# attempts to reduce the number of points
size = len( table.cols.ix[istart:iend] )
step = max( [1, size/1200] )
else:
step = 1
ix = table.cols.ix[istart:iend:step].tolist()
wx = table.cols.wx[istart:iend:step].tolist()
cx = table.cols.cx[istart:iend:step].tolist()
ax = table.cols.ax[istart:iend:step].tolist()
return ix, wx, cx, ax
# Chart helpers
def build_tracks( param, conf, data_label, fit_label, pred_label, strand, show=False ):
"""
Builds tracks
"""
# gets all the labels for a fast lookup
label_cache = atlas_label_cache( conf )
# get database handles for hdf and sql
db, session = open_databases( conf )
# fetching x and y coordinates for bar and fit (line) for
# each strand plus (p), minus (m), all (a)
bix, bpy, bmy, bay = hdf_query( db=db, name=data_label, param=param )
fix, fpy, fmy, fay = hdf_query( db=db, name=fit_label, param=param )
# close the hdf database
db.close()
# get all features within the range
all = feature_query( session=session, param=param )
# draws the barchart and the nucleosome chart below it
if strand == 'composite':
bar = prefab.composite_bartrack( fix=fix, fay=fay, bix=bix, bay=bay, param=param)
else:
bar = prefab.twostrand_bartrack( fix=fix, fmy=fmy, fpy=fpy, bix=bix, bmy=bmy, bpy=bpy, param=param)
charts = list()
charts.append( bar )
return charts
def feature_chart(param=None, session=None, label=None, label_dict={}, color=cycle( [LIGHT, WHITE] ) ):
all = feature_filter(feature_query(session=session, param=param), name=label, kdict=label_dict)
flipped = []
for feature in all:
if feature.strand == "-":
feature.start, feature.end = feature.end, feature.start
flipped.append(feature)
opts = track_options(
xscale=param.xscale, w=param.width, fgColor=PURPLE,
show_labels=param.show_labels, ylabel=str(label),
bgColor=color.next()
)
return [
tracks.split_tracks(features=flipped, options=opts, split=param.show_labels, track_type='vector')
]
def consolidate_charts( charts, param ):
# create the multiplot
opt = chart_options( w=param.width )
multi = chart.MultiChart(options=opt, charts=charts)
return multi
# SETUP Track Builders
import functools
def twostrand_tracks( param=None, conf=None ):
return build_tracks( data_label=conf.LABEL, fit_label=conf.FIT_LABEL, pred_label=conf.PRED_LABEL, param=param, conf=conf, strand='twostrand')
def composite_tracks( param=None, conf=None ):
return build_tracks( data_label=conf.LABEL, fit_label=conf.FIT_LABEL, pred_label=conf.PRED_LABEL, param=param, conf=conf, strand='composite')
class BaseConf( object ):
"""
Fake web_conf for atlas.
"""
IMAGE_DIR = "static/genetrack/plots/"
LEVELS = [str(x) for x in [ 50, 100, 250, 500, 1000, 2500, 5000, 10000, 20000, 50000, 100000, 200000 ]]
ZOOM_LEVELS = zip(LEVELS, LEVELS)
PLOT_SETUP = [
('comp-id', 'Composite' , 'genetrack/index.html', composite_tracks ),
('two-id' , 'Two Strand', 'genetrack/index.html', twostrand_tracks ),
]
PLOT_CHOICES = [ (id, name) for (id, name, page, func) in PLOT_SETUP ]
PLOT_MAPPER = dict( [ (id, (page, func)) for (id, name, page, func) in PLOT_SETUP ] )
def __init__(self, **kwds):
for key,value in kwds.items():
setattr( self, key, value)
class WebRoot(BaseController):
@web.expose
def search(self, trans, word='', dataset_id=None, submit=''):
"""
Default search page
"""
data = trans.app.model.HistoryDatasetAssociation.get( dataset_id )
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) )
# the main configuration file
conf = BaseConf(
TITLE = "<i>%s</i>: %s" % (data.metadata.dbkey, data.metadata.label),
HDF_DATABASE = os.path.join( data.extra_files_path, data.metadata.hdf ),
SQL_URI = "sqlite:///%s" % os.path.join( data.extra_files_path, data.metadata.sqlite ),
LABEL = data.metadata.label,
FIT_LABEL = "%s-SIGMA-%d" % (data.metadata.label, 20),
PRED_LABEL = "PRED-%s-SIGMA-%d" % (data.metadata.label, 20),
)
param = atlas.Param( word=word )
# search for a given
try:
session = sql.get_session( conf.SQL_URI )
except:
return trans.fill_template_mako('genetrack/invalid.html', dataset_id=dataset_id)
if param.word:
def search_query( word, text ):
query = session.query(sql.Feature).filter( "name LIKE :word or freetext LIKE :text" ).params(word=word, text=text)
query = list(query[:20])
return query
# a little heuristics to match most likely target
targets = [
(param.word+'%', 'No match'), # match beginning
('%'+param.word+'%', 'No match'), # match name anywhere
('%'+param.word+'%', '%'+param.word+'%'), # match json anywhere
]
for word, text in targets:
query = search_query( word=word, text=text)
if query:
break
else:
query = []
return trans.fill_template_mako('genetrack/search.html', param=param, query=query, dataset_id=dataset_id)
@web.expose
def index(self, trans, dataset_id=None, **kwds):
"""
Main request handler
"""
color = cycle( [LIGHT, WHITE] )
data = trans.app.model.HistoryDatasetAssociation.get( dataset_id )
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) )
# the main configuration file
conf = BaseConf(
TITLE = "<i>%s</i>: %s" % (data.metadata.dbkey, data.metadata.label),
HDF_DATABASE = os.path.join( data.extra_files_path, data.metadata.hdf ),
SQL_URI = "sqlite:///%s" % os.path.join( data.extra_files_path, data.metadata.sqlite ),
LABEL = data.metadata.label,
FIT_LABEL = "%s-SIGMA-%d" % (data.metadata.label, 20),
PRED_LABEL = "PRED-%s-SIGMA-%d" % (data.metadata.label, 20),
)
try:
session = sql.get_session( conf.SQL_URI )
except:
return trans.fill_template_mako('genetrack/invalid.html', dataset_id=dataset_id)
if os.path.exists( conf.HDF_DATABASE ):
db = hdf.hdf_open( conf.HDF_DATABASE, mode='r' )
conf.CHROM_FIELDS = [(x,x) for x in hdf.GroupData(db=db, name=conf.LABEL).labels]
db.close()
else:
query = session.execute(sql.select([sql.feature_table.c.chrom]).distinct())
conf.CHROM_FIELDS = [(x.chrom,x.chrom) for x in query]
# generate a new form based on the configuration
form = formlib.main_form( conf )
# clear the tempdir every once in a while
atlas_utils.clear_tempdir( dir=conf.IMAGE_DIR, days=1, chance=10)
incoming = form.defaults()
incoming.update( kwds )
# manage the zoom and pan requests
incoming = formlib.zoom_change( kdict=incoming, levels=conf.LEVELS)
incoming = formlib.pan_view( kdict=incoming )
# process the form
param = atlas.Param( **incoming )
form.process( incoming )
if kwds and form.isSuccessful():
# adds the sucessfull parameters
param.update( form.values() )
# if it was a search word not a number go to search page
try:
center = int( param.feature )
except ValueError:
# go and search for these
return trans.response.send_redirect( web.url_for( controller='genetrack', action='search', word=param.feature, dataset_id=dataset_id ) )
param.width = min( [2000, int(param.img_size)] )
param.xscale = [ param.start, param.end ]
param.show_labels = ( param.end - param.start ) <= SHOW_LABEL_LIMIT
# get the template and the function used to generate the tracks
tmpl_name, track_maker = conf.PLOT_MAPPER[param.plot]
# check against a hash, display an image that already exists if it was previously created.
hash = sha.new()
hash.update(str(dataset_id))
for key in sorted(kwds.keys()):
hash.update(str(kwds[key]))
fname = "%s.png" % hash.hexdigest()
fpath = os.path.join(conf.IMAGE_DIR, fname)
charts = []
param.fname = fname
# The SHA1 hash should uniquely identify the qs that created the plot...
if os.path.exists(fpath):
os.utime(fpath, (time.time(), time.time()))
return trans.fill_template_mako(tmpl_name, conf=conf, form=form, param=param, dataset_id=dataset_id)
# If the hashed filename doesn't exist, create it.
if track_maker is not None and os.path.exists( conf.HDF_DATABASE ):
# generate the fit track
charts = track_maker( param=param, conf=conf )
for label in list_labels( session ):
charts.extend( feature_chart(param=param, session=session, label=label.name, label_dict={label.name:label.id}, color=color))
track_chart = consolidate_charts( charts, param )
track_chart.save(fname=fpath)
return trans.fill_template_mako(tmpl_name, conf=conf, form=form, param=param, dataset_id=dataset_id)
|
|
"""
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import print_function, division
import inspect
import textwrap
from sympy.core.compatibility import (exec_, is_sequence, iterable,
NotIterable, string_types, range, builtins)
from sympy.utilities.decorator import doctest_depends_on
# These are the namespaces the lambda functions will use.
MATH = {}
MPMATH = {}
NUMPY = {}
TENSORFLOW = {}
SYMPY = {}
NUMEXPR = {}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
# These are separate from the names above because the above names are modified
# throughout this file, whereas these should remain unmodified.
MATH_DEFAULT = {}
MPMATH_DEFAULT = {}
NUMPY_DEFAULT = {"I": 1j}
TENSORFLOW_DEFAULT = {}
SYMPY_DEFAULT = {}
NUMEXPR_DEFAULT = {}
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci"
}
NUMPY_TRANSLATIONS = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"E": "e",
"im": "imag",
"ln": "log",
"Mod": "mod",
"oo": "inf",
"re": "real",
"SparseMatrix": "array",
"ImmutableSparseMatrix": "array",
"Matrix": "array",
"MutableDenseMatrix": "array",
"ImmutableMatrix": "array",
"ImmutableDenseMatrix": "array",
}
TENSORFLOW_TRANSLATIONS = {
"Abs": "abs",
"ceiling": "ceil",
"Max": "maximum",
"Min": "minimum",
"im": "imag",
"ln": "log",
"Mod": "mod",
"conjugate": "conj",
"re": "real",
}
NUMEXPR_TRANSLATIONS = {}
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import_module('numpy')",)),
"tensorflow": (TENSORFLOW, TENSORFLOW_DEFAULT, TENSORFLOW_TRANSLATIONS, ("import_module('tensorflow')",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload="False"):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy", "tensorflow".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
from sympy.external import import_module
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec_(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
# For computing the modulus of a sympy expression we use the builtin abs
# function, instead of the previously used fabs function for all
# translation modules. This is because the fabs function in the math
# module does not accept complex valued arguments. (see issue 9474). The
# only exception, where we don't use the builtin abs function is the
# mpmath translation module, because mpmath.fabs returns mpf objects in
# contrast to abs().
if 'Abs' not in namespace:
namespace['Abs'] = abs
@doctest_depends_on(modules=('numpy'))
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=True):
"""
Returns a lambda function for fast calculation of numerical values.
If not specified differently by the user, ``modules`` defaults to
``["numpy"]`` if NumPy is installed, and ``["math", "mpmath", "sympy"]``
if it isn't, that is, SymPy functions are replaced as far as possible by
either ``numpy`` functions if available, and Python's standard library
``math``, or ``mpmath`` functions otherwise. To change this behavior, the
"modules" argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "numexpr", "sympy", "tensorflow"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
The default behavior is to substitute all arguments in the provided
expression with dummy symbols. This allows for applied functions (e.g.
f(t)) to be supplied as arguments. Call the function with dummify=False if
dummy substitution is unwanted (and `args` is not a string). If you want
to view the lambdified function or provide "sympy" as the module, you
should probably set dummify=False.
For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
implemented_function and user defined subclasses of Function. If specified,
numexpr may be the only option in modules. The official list of numexpr
functions can be found at:
https://github.com/pydata/numexpr#supported-functions
In previous releases ``lambdify`` replaced ``Matrix`` with ``numpy.matrix``
by default. As of release 1.0 ``numpy.array`` is the default.
To get the old default behavior you must pass in ``[{'ImmutableMatrix':
numpy.matrix}, 'numpy']`` to the ``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
matrix([[1],
[2]])
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import sin, tan, gamma
>>> from sympy.utilities.lambdify import lambdastr
>>> from sympy.abc import x, y
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the lambda function is evaluated! So this would
be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>>> import numpy
>>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>>> f = lambdify((x,y), tan(x*y), "numpy")
>>> f(1, 2)
-2.18503986326
>>> from numpy import array
>>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import flatten
# If the user hasn't specified any modules, use what is available.
module_provided = True
if modules is None:
module_provided = False
try:
_import("numpy")
except ImportError:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if _module_present('numpy',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumPyPrinter as printer
if _module_present('numexpr',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumExprPrinter as printer
if _module_present('tensorflow',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import TensorflowPrinter as printer
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create lambda function.
lstr = lambdastr(args, expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
if flat in lstr:
namespace.update({flat: flatten})
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
func = eval(lstr, namespace)
# For numpy lambdify, wrap all input arguments in arrays.
# This is a fix for gh-11306.
if module_provided and _module_present('numpy',namespaces):
def array_wrap(funcarg):
def wrapper(*argsx, **kwargsx):
return funcarg(*[namespace['asarray'](i) for i in argsx], **kwargsx)
return wrapper
func = array_wrap(func)
# Apply the docstring
sig = "func({0})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = ("Created with lambdify. Signature:\n\n{sig}\n\n"
"Expression:\n\n{expr}").format(sig=sig, expr=expr_str)
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=False):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(*list(__flatten_args__([_0,_1])))'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
#Sub in dummy variables for functions or symbols
if isinstance(args, (Function, Symbol)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
try:
expr = sympify(expr).xreplace(dummies_dict)
except Exception:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [sub_expr(sympify(a), dummies_dict) for a in expr.keys()]
v = [sub_expr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(sub_expr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [sub_expr(sympify(a), dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector, NotIterable))
if isiter(args) and any(isiter(i) for i in args):
from sympy.utilities.iterables import flatten
import re
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
iter_args = ','.join([i if isiter(a) else i
for i, a in zip(dum_args, args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
rv = 'lambda %s: (%s)(*list(%s([%s])))' % (
','.join(dum_args), lstr, flat, iter_args)
if len(re.findall(r'\b%s\b' % flat, rv)) > 1:
raise ValueError('the name %s is reserved by lambdastr' % flat)
return rv
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as `expr`. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within `expr` and
corresponding values being the numerical implementation of
function
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of SymPy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If `symfunc` is a sympy function, attach implementation to it.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
if isinstance(symfunc, string_types):
symfunc = UndefinedFunction(symfunc)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError('symfunc should be either a string or'
' an UndefinedFunction instance.')
# We need to attach as a method because symfunc will be a class
symfunc._imp_ = staticmethod(implementation)
return symfunc
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.codegen.register import build_file_aliases as register_codegen
from pants.backend.core.register import build_file_aliases as register_core
from pants.backend.jvm.register import build_file_aliases as register_jvm
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.project_info.tasks.filedeps import FileDeps
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class FileDepsTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return register_core().merge(register_jvm()).merge(register_codegen())
@classmethod
def task_type(cls):
return FileDeps
def setUp(self):
super(FileDepsTest, self).setUp()
self.context(options={
'scala-platform': {
'runtime': ['tools:scala-library']
}
})
# TODO(John Sirois): Rationalize much of this target emission setup. Lots of tests do similar
# things: https://github.com/pantsbuild/pants/issues/525
def create_target(path, definition, sources=None):
if sources:
self.create_files(path, sources)
self.add_to_build_file(path, definition)
create_target(path='tools',
definition=dedent("""
jar_library(
name='scala-library',
jars=[
jar('org.scala-lang', 'scala-library', '2.11.2'),
]
)
"""))
create_target(path='src/scala/core',
definition=dedent("""
scala_library(
name='core',
sources=[
'core1.scala'
],
java_sources=[
'src/java/core'
]
)
"""),
sources=['core1.scala'])
create_target(path='src/java/core',
definition=dedent("""
java_library(
name='core',
sources=globs(
'core*.java',
),
dependencies=[
'src/scala/core'
]
)
"""),
sources=['core1.java', 'core2.java'])
create_target(path='src/resources/lib',
definition=dedent("""
resources(
name='lib',
sources=[
'data.json'
]
)
"""),
sources=['data.json'])
create_target(path='src/thrift/storage',
definition=dedent("""
java_thrift_library(
name='storage',
sources=[
'data_types.thrift'
]
)
"""),
sources=['src/thrift/storage/data_types.thrift'])
create_target(path='src/java/lib',
definition=dedent("""
java_library(
name='lib',
sources=[
'lib1.java'
],
dependencies=[
'src/scala/core',
'src/thrift/storage'
],
resources=[
'src/resources/lib'
]
)
"""),
sources=['lib1.java'])
# Derive a synthetic target from the src/thrift/storage thrift target as-if doing code-gen.
self.create_file('.pants.d/gen/thrift/java/storage/Angle.java')
self.make_target(spec='.pants.d/gen/thrift/java/storage',
target_type=JavaLibrary,
derived_from=self.target('src/thrift/storage'),
sources=['Angle.java'])
synthetic_java_lib = self.target('.pants.d/gen/thrift/java/storage')
java_lib = self.target('src/java/lib')
java_lib.inject_dependency(synthetic_java_lib.address)
create_target(path='src/java/bin',
definition=dedent("""
jvm_binary(
name='bin',
source='main.java',
main='bin.Main',
dependencies=[
'src/java/lib'
]
)
"""),
sources=['main.java'])
create_target(path='project',
definition=dedent("""
jvm_app(
name='app',
binary='src/java/bin',
bundles=[
bundle(fileset=['config/app.yaml'])
]
)
"""),
sources=['config/app.yaml'])
def test_resources(self):
self.assert_console_output(
'src/resources/lib/BUILD',
'src/resources/lib/data.json',
targets=[self.target('src/resources/lib')]
)
def test_globs(self):
self.assert_console_output(
'tools/BUILD',
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core*.java',
targets=[self.target('src/scala/core')],
options=dict(globs=True),
)
def test_scala_java_cycle_scala_end(self):
self.assert_console_output(
'tools/BUILD',
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core1.java',
'src/java/core/core2.java',
targets=[self.target('src/scala/core')]
)
def test_scala_java_cycle_java_end(self):
self.assert_console_output(
'tools/BUILD',
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core1.java',
'src/java/core/core2.java',
targets=[self.target('src/java/core')]
)
def test_concrete_only(self):
self.assert_console_output(
'tools/BUILD',
'src/java/lib/BUILD',
'src/java/lib/lib1.java',
'src/thrift/storage/BUILD',
'src/thrift/storage/data_types.thrift',
'src/resources/lib/BUILD',
'src/resources/lib/data.json',
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core1.java',
'src/java/core/core2.java',
targets=[self.target('src/java/lib')]
)
def test_jvm_app(self):
self.assert_console_output(
'tools/BUILD',
'project/BUILD',
'project/config/app.yaml',
'src/java/bin/BUILD',
'src/java/bin/main.java',
'src/java/lib/BUILD',
'src/java/lib/lib1.java',
'src/thrift/storage/BUILD',
'src/thrift/storage/data_types.thrift',
'src/resources/lib/BUILD',
'src/resources/lib/data.json',
'src/scala/core/BUILD',
'src/scala/core/core1.scala',
'src/java/core/BUILD',
'src/java/core/core1.java',
'src/java/core/core2.java',
targets=[self.target('project:app')]
)
def assert_console_output(self, *paths, **kwargs):
abs_paths = [os.path.join(self.build_root, path) for path in paths]
super(FileDepsTest, self).assert_console_output(*abs_paths, **kwargs)
|
|
# -*- coding: utf-8 -*-
"""
oauthlib.common
~~~~~~~~~~~~~~
This module provides data structures and utilities common
to all implementations of OAuth.
"""
from __future__ import absolute_import, unicode_literals
import collections
import datetime
import logging
import random
import re
import sys
import time
try:
from urllib import quote as _quote
from urllib import unquote as _unquote
from urllib import urlencode as _urlencode
except ImportError:
from urllib.parse import quote as _quote
from urllib.parse import unquote as _unquote
from urllib.parse import urlencode as _urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from oauth_classes import Grant
import SERVER_CONFIG as CONFIG
UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789')
CLIENT_ID_CHARACTER_SET = (r' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMN'
'OPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}')
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
log = logging.getLogger('oauthlib')
PY3 = sys.version_info[0] == 3
if PY3:
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
# 'safe' must be bytes (Python 2.6 requires bytes, other versions allow either)
def quote(s, safe=b'/'):
s = s.encode('utf-8') if isinstance(s, unicode_type) else s
s = _quote(s, safe)
# PY3 always returns unicode. PY2 may return either, depending on whether
# it had to modify the string.
if isinstance(s, bytes_type):
s = s.decode('utf-8')
return s
def unquote(s):
s = _unquote(s)
# PY3 always returns unicode. PY2 seems to always return what you give it,
# which differs from quote's behavior. Just to be safe, make sure it is
# unicode before we return.
if isinstance(s, bytes_type):
s = s.decode('utf-8')
return s
def urlencode(params):
utf8_params = encode_params_utf8(params)
urlencoded = _urlencode(utf8_params)
if isinstance(urlencoded, unicode_type): # PY3 returns unicode
return urlencoded
else:
return urlencoded.decode("utf-8")
def encode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are encoded to
bytestrings using UTF-8
"""
encoded = []
for k, v in params:
encoded.append((
k.encode('utf-8') if isinstance(k, unicode_type) else k,
v.encode('utf-8') if isinstance(v, unicode_type) else v))
return encoded
def decode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are decoded to
unicode using UTF-8.
"""
decoded = []
for k, v in params:
decoded.append((
k.decode('utf-8') if isinstance(k, bytes_type) else k,
v.decode('utf-8') if isinstance(v, bytes_type) else v))
return decoded
urlencoded = set(always_safe) | set('=&;%+~,*@')
def urldecode(query):
"""Decode a query string in x-www-form-urlencoded format into a sequence
of two-element tuples.
Unlike urlparse.parse_qsl(..., strict_parsing=True) urldecode will enforce
correct formatting of the query string by validation. If validation fails
a ValueError will be raised. urllib.parse_qsl will only raise errors if
any of name-value pairs omits the equals sign.
"""
# Check if query contains invalid characters
if query and not set(query) <= urlencoded:
error = ("Error trying to decode a non urlencoded string. "
"Found invalid characters: %s "
"in the string: '%s'. "
"Please ensure the request/response body is "
"x-www-form-urlencoded.")
raise ValueError(error % (set(query) - urlencoded, query))
# Check for correctly hex encoded values using a regular expression
# All encoded values begin with % followed by two hex characters
# correct = %00, %A0, %0A, %FF
# invalid = %G0, %5H, %PO
invalid_hex = '%[^0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]'
if len(re.findall(invalid_hex, query)):
raise ValueError('Invalid hex encoding in query string.')
# We encode to utf-8 prior to parsing because parse_qsl behaves
# differently on unicode input in python 2 and 3.
# Python 2.7
# >>> urlparse.parse_qsl(u'%E5%95%A6%E5%95%A6')
# u'\xe5\x95\xa6\xe5\x95\xa6'
# Python 2.7, non unicode input gives the same
# >>> urlparse.parse_qsl('%E5%95%A6%E5%95%A6')
# '\xe5\x95\xa6\xe5\x95\xa6'
# but now we can decode it to unicode
# >>> urlparse.parse_qsl('%E5%95%A6%E5%95%A6').decode('utf-8')
# u'\u5566\u5566'
# Python 3.3 however
# >>> urllib.parse.parse_qsl(u'%E5%95%A6%E5%95%A6')
# u'\u5566\u5566'
query = query.encode(
'utf-8') if not PY3 and isinstance(query, unicode_type) else query
# We want to allow queries such as "c2" whereas urlparse.parse_qsl
# with the strict_parsing flag will not.
params = urlparse.parse_qsl(query, keep_blank_values=True)
# unicode all the things
return decode_params_utf8(params)
def extract_params(raw):
"""Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None.
"""
if isinstance(raw, bytes_type) or isinstance(raw, unicode_type):
try:
params = urldecode(raw)
except ValueError:
params = None
elif hasattr(raw, '__iter__'):
try:
dict(raw)
except ValueError:
params = None
except TypeError:
params = None
else:
params = list(raw.items() if isinstance(raw, dict) else raw)
params = decode_params_utf8(params)
else:
params = None
return params
def generate_nonce():
"""Generate pseudorandom nonce that is unlikely to repeat.
Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
Per `section 3.2.1`_ of the MAC Access Authentication spec.
A random 64-bit number is appended to the epoch timestamp for both
randomness and to decrease the likelihood of collisions.
.. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
.. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
"""
return unicode_type(unicode_type(random.getrandbits(64)) + generate_timestamp())
def generate_timestamp():
"""Get seconds since epoch (UTC).
Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
Per `section 3.2.1`_ of the MAC Access Authentication spec.
.. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
.. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
"""
return unicode_type(int(time.time()))
def generate_token(length=30, chars=UNICODE_ASCII_CHARACTER_SET):
"""Generates a non-guessable OAuth token
OAuth (1 and 2) does not specify the format of tokens except that they
should be strings of random characters. Tokens should not be guessable
and entropy when generating the random characters is important. Which is
why SystemRandom is used instead of the default random.choice method.
"""
rand = random.SystemRandom()
return ''.join(rand.choice(chars) for x in range(length))
def OK_generate_token(req):
username = req.body['username']
password = req.body['password']
client_id = req.body['client_id']
redirect_uri = req.body['redirect_uri']
g = Grant(username, password, client_id, redirect_uri)
code = g.encrypt_to_string(CONFIG.secret)
return code
def generate_signed_token(private_pem, request):
import Crypto.PublicKey.RSA as RSA
import jwt
private_key = RSA.importKey(private_pem)
now = datetime.datetime.utcnow()
claims = {
'scope': request.scope,
'exp': now + datetime.timedelta(seconds=request.expires_in)
}
claims.update(request.claims)
token = jwt.encode(claims, private_key, 'RS256')
token = to_unicode(token, "UTF-8")
return token
def verify_signed_token(private_pem, token):
import Crypto.PublicKey.RSA as RSA
import jwt
public_key = RSA.importKey(private_pem).publickey()
try:
# return jwt.verify_jwt(token.encode(), public_key)
return jwt.decode(token, public_key)
except:
raise Exception
def generate_client_id(length=30, chars=CLIENT_ID_CHARACTER_SET):
"""Generates an OAuth client_id
OAuth 2 specify the format of client_id in
http://tools.ietf.org/html/rfc6749#appendix-A.
"""
return generate_token(length, chars)
def add_params_to_qs(query, params):
"""Extend a query with a list of two-tuples."""
if isinstance(params, dict):
params = params.items()
queryparams = urlparse.parse_qsl(query, keep_blank_values=True)
queryparams.extend(params)
return urlencode(queryparams)
def add_params_to_uri(uri, params, fragment=False):
"""Add a list of two-tuples to the uri query components."""
sch, net, path, par, query, fra = urlparse.urlparse(uri)
if fragment:
fra = add_params_to_qs(fra, params)
else:
query = add_params_to_qs(query, params)
return urlparse.urlunparse((sch, net, path, par, query, fra))
def safe_string_equals(a, b):
""" Near-constant time string comparison.
Used in order to avoid timing attacks on sensitive information such
as secret keys during request verification (`rootLabs`_).
.. _`rootLabs`: http://rdist.root.org/2010/01/07/timing-independent-array-comparison/
"""
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def to_unicode(data, encoding='UTF-8'):
"""Convert a number of different types of objects to unicode."""
if isinstance(data, unicode_type):
return data
if isinstance(data, bytes_type):
return unicode_type(data, encoding=encoding)
if hasattr(data, '__iter__'):
try:
dict(data)
except TypeError:
pass
except ValueError:
# Assume it's a one dimensional data structure
return (to_unicode(i, encoding) for i in data)
else:
# We support 2.6 which lacks dict comprehensions
if hasattr(data, 'items'):
data = data.items()
return dict(((to_unicode(k, encoding), to_unicode(v, encoding)) for k, v in data))
return data
class CaseInsensitiveDict(dict):
"""Basic case insensitive dict with strings only keys."""
proxy = {}
def __init__(self, data):
self.proxy = dict((k.lower(), k) for k in data)
for k in data:
self[k] = data[k]
def __contains__(self, k):
return k.lower() in self.proxy
def __delitem__(self, k):
key = self.proxy[k.lower()]
super(CaseInsensitiveDict, self).__delitem__(key)
del self.proxy[k.lower()]
def __getitem__(self, k):
key = self.proxy[k.lower()]
return super(CaseInsensitiveDict, self).__getitem__(key)
def get(self, k, default=None):
return self[k] if k in self else default
def __setitem__(self, k, v):
super(CaseInsensitiveDict, self).__setitem__(k, v)
self.proxy[k.lower()] = k
class Request(object):
"""A malleable representation of a signable HTTP request.
Body argument may contain any data, but parameters will only be decoded if
they are one of:
* urlencoded query string
* dict
* list of 2-tuples
Anything else will be treated as raw body data to be passed through
unmolested.
"""
def __init__(self, uri, http_method='GET', body=None, headers=None,
encoding='utf-8'):
# Convert to unicode using encoding if given, else assume unicode
encode = lambda x: to_unicode(x, encoding) if encoding else x
self.uri = encode(uri)
self.http_method = encode(http_method)
self.headers = CaseInsensitiveDict(encode(headers or {}))
self.body = encode(body)
self.decoded_body = extract_params(encode(body))
self.oauth_params = []
self._params = {}
self._params.update(dict(urldecode(self.uri_query)))
self._params.update(dict(self.decoded_body or []))
self._params.update(self.headers)
def __getattr__(self, name):
return self._params.get(name, None)
def __repr__(self):
return '<oauthlib.Request url="%s", http_method="%s", headers="%s", body="%s">' % (
self.uri, self.http_method, self.headers, self.body)
@property
def uri_query(self):
return urlparse.urlparse(self.uri).query
@property
def uri_query_params(self):
if not self.uri_query:
return []
return urlparse.parse_qsl(self.uri_query, keep_blank_values=True,
strict_parsing=True)
@property
def duplicate_params(self):
seen_keys = collections.defaultdict(int)
all_keys = (p[0]
for p in (self.decoded_body or []) + self.uri_query_params)
for k in all_keys:
seen_keys[k] += 1
return [k for k, c in seen_keys.items() if c > 1]
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import pipes
import shutil
import subprocess
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(chrome_src, 'tools', 'gyp', 'pylib'))
json_data_file = os.path.join(script_dir, 'win_toolchain.json')
import gyp
# Use MSVS2013 as the default toolchain.
CURRENT_DEFAULT_TOOLCHAIN_VERSION = '2013'
def SetEnvironmentAndGetRuntimeDllDirs():
"""Sets up os.environ to use the depot_tools VS toolchain with gyp, and
returns the location of the VS runtime DLLs so they can be copied into
the output directory after gyp generation.
"""
vs_runtime_dll_dirs = None
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
# When running on a non-Windows host, only do this if the SDK has explicitly
# been downloaded before (in which case json_data_file will exist).
if ((sys.platform in ('win32', 'cygwin') or os.path.exists(json_data_file))
and depot_tools_win_toolchain):
if ShouldUpdateToolchain():
Update()
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
toolchain = toolchain_data['path']
version = toolchain_data['version']
win_sdk = toolchain_data.get('win_sdk')
if not win_sdk:
win_sdk = toolchain_data['win8sdk']
wdk = toolchain_data['wdk']
# TODO(scottmg): The order unfortunately matters in these. They should be
# split into separate keys for x86 and x64. (See CopyVsRuntimeDlls call
# below). http://crbug.com/345992
vs_runtime_dll_dirs = toolchain_data['runtime_dirs']
os.environ['GYP_MSVS_OVERRIDE_PATH'] = toolchain
os.environ['GYP_MSVS_VERSION'] = version
# We need to make sure windows_sdk_path is set to the automated
# toolchain values in GYP_DEFINES, but don't want to override any
# otheroptions.express
# values there.
gyp_defines_dict = gyp.NameValueListToDict(gyp.ShlexEnv('GYP_DEFINES'))
gyp_defines_dict['windows_sdk_path'] = win_sdk
os.environ['GYP_DEFINES'] = ' '.join('%s=%s' % (k, pipes.quote(str(v)))
for k, v in gyp_defines_dict.iteritems())
os.environ['WINDOWSSDKDIR'] = win_sdk
os.environ['WDK_DIR'] = wdk
# Include the VS runtime in the PATH in case it's not machine-installed.
runtime_path = ';'.join(vs_runtime_dll_dirs)
os.environ['PATH'] = runtime_path + ';' + os.environ['PATH']
elif sys.platform == 'win32' and not depot_tools_win_toolchain:
if not 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
os.environ['GYP_MSVS_OVERRIDE_PATH'] = DetectVisualStudioPath()
if not 'GYP_MSVS_VERSION' in os.environ:
os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
return vs_runtime_dll_dirs
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
raise Exception('The python library _winreg not found.')
def GetVisualStudioVersion():
"""Return GYP_MSVS_VERSION of Visual Studio.
"""
return os.environ.get('GYP_MSVS_VERSION', CURRENT_DEFAULT_TOOLCHAIN_VERSION)
def DetectVisualStudioPath():
"""Return path to the GYP_MSVS_VERSION of Visual Studio.
"""
# Note that this code is used from
# build/toolchain/win/setup_toolchain.py as well.
version_as_year = GetVisualStudioVersion()
year_to_version = {
'2013': '12.0',
'2015': '14.0',
}
if version_as_year not in year_to_version:
raise Exception(('Visual Studio version %s (from GYP_MSVS_VERSION)'
' not supported. Supported versions are: %s') % (
version_as_year, ', '.join(year_to_version.keys())))
version = year_to_version[version_as_year]
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version]
for key in keys:
path = _RegistryGetValue(key, 'InstallDir')
if not path:
continue
path = os.path.normpath(os.path.join(path, '..', '..'))
return path
raise Exception(('Visual Studio Version %s (from GYP_MSVS_VERSION)'
' not found.') % (version_as_year))
def _VersionNumber():
"""Gets the standard version number ('120', '140', etc.) based on
GYP_MSVS_VERSION."""
vs_version = GetVisualStudioVersion()
if vs_version == '2013':
return '120'
elif vs_version == '2015':
return '140'
else:
raise ValueError('Unexpected GYP_MSVS_VERSION')
def _CopyRuntimeImpl(target, source):
"""Copy |source| to |target| if it doesn't already exist or if it
needs to be updated.
"""
if (os.path.isdir(os.path.dirname(target)) and
(not os.path.isfile(target) or
os.stat(target).st_mtime != os.stat(source).st_mtime)):
print 'Copying %s to %s...' % (source, target)
if os.path.exists(target):
os.unlink(target)
shutil.copy2(source, target)
def _CopyRuntime2013(target_dir, source_dir, dll_pattern):
"""Copy both the msvcr and msvcp runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for file_part in ('p', 'r'):
dll = dll_pattern % file_part
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
def _CopyRuntime2015(target_dir, source_dir, dll_pattern):
"""Copy both the msvcp and vccorlib runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for file_part in ('msvcp', 'vccorlib', 'vcruntime'):
dll = dll_pattern % file_part
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
def _CopyRuntime(target_dir, source_dir, target_cpu, debug):
"""Copy the VS runtime DLLs, only if the target doesn't exist, but the target
directory does exist. Handles VS 2013 and VS 2015."""
suffix = "d.dll" if debug else ".dll"
if GetVisualStudioVersion() == '2015':
_CopyRuntime2015(target_dir, source_dir, '%s140' + suffix)
if debug:
_CopyRuntimeImpl(os.path.join(target_dir, 'ucrtbased.dll'),
os.path.join(source_dir, 'ucrtbased.dll'))
else:
_CopyRuntime2013(target_dir, source_dir, 'msvc%s120' + suffix)
# Copy the PGO runtime library to the release directories.
if not debug and os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
'VC', 'bin')
pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
if target_cpu == "x86":
source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x86):
_CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll), source_x86)
elif target_cpu == "x64":
source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x64):
_CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll),
source_x64)
else:
raise NotImplementedError("Unexpected target_cpu value:" + target_cpu)
def CopyVsRuntimeDlls(output_dir, runtime_dirs):
"""Copies the VS runtime DLLs from the given |runtime_dirs| to the output
directory so that even if not system-installed, built binaries are likely to
be able to run.
This needs to be run after gyp has been run so that the expected target
output directories are already created.
This is used for the GYP build and gclient runhooks.
"""
x86, x64 = runtime_dirs
out_debug = os.path.join(output_dir, 'Debug')
out_debug_nacl64 = os.path.join(output_dir, 'Debug', 'x64')
out_release = os.path.join(output_dir, 'Release')
out_release_nacl64 = os.path.join(output_dir, 'Release', 'x64')
out_debug_x64 = os.path.join(output_dir, 'Debug_x64')
out_release_x64 = os.path.join(output_dir, 'Release_x64')
if os.path.exists(out_debug) and not os.path.exists(out_debug_nacl64):
os.makedirs(out_debug_nacl64)
if os.path.exists(out_release) and not os.path.exists(out_release_nacl64):
os.makedirs(out_release_nacl64)
_CopyRuntime(out_debug, x86, "x86", debug=True)
_CopyRuntime(out_release, x86, "x86", debug=False)
_CopyRuntime(out_debug_x64, x64, "x64", debug=True)
_CopyRuntime(out_release_x64, x64, "x64", debug=False)
_CopyRuntime(out_debug_nacl64, x64, "x64", debug=True)
_CopyRuntime(out_release_nacl64, x64, "x64", debug=False)
def CopyDlls(target_dir, configuration, target_cpu):
"""Copy the VS runtime DLLs into the requested directory as needed.
configuration is one of 'Debug' or 'Release'.
target_cpu is one of 'x86' or 'x64'.
The debug configuration gets both the debug and release DLLs; the
release config only the latter.
This is used for the GN build.
"""
vs_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
if not vs_runtime_dll_dirs:
return
x64_runtime, x86_runtime = vs_runtime_dll_dirs
runtime_dir = x64_runtime if target_cpu == 'x64' else x86_runtime
_CopyRuntime(target_dir, runtime_dir, target_cpu, debug=False)
if configuration == 'Debug':
_CopyRuntime(target_dir, runtime_dir, target_cpu, debug=True)
def _GetDesiredVsToolchainHashes():
"""Load a list of SHA1s corresponding to the toolchains that we want installed
to build with."""
if GetVisualStudioVersion() == '2015':
# Update 1 with Debuggers, UCRT installers and ucrtbased.dll
return ['524956ec6e64e68fead3773e3ce318537657b404']
else:
# Default to VS2013.
return ['9ff97c632ae1fee0c98bcd53e71770eb3a0d8deb']
def ShouldUpdateToolchain():
"""Check if the toolchain should be upgraded."""
if not os.path.exists(json_data_file):
return True
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
version = toolchain_data['version']
env_version = GetVisualStudioVersion()
# If there's a mismatch between the version set in the environment and the one
# in the json file then the toolchain should be updated.
return version != env_version
def Update(force=False):
"""Requests an update of the toolchain to the specific hashes we have at
this revision. The update outputs a .json of the various configuration
information required to pass to gyp which we use in |GetToolchainDir()|.
"""
if force != False and force != '--force':
print >>sys.stderr, 'Unknown parameter "%s"' % force
return 1
if force == '--force' or os.path.exists(json_data_file):
force = True
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
if ((sys.platform in ('win32', 'cygwin') or force) and
depot_tools_win_toolchain):
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
get_toolchain_args = [
sys.executable,
os.path.join(depot_tools_path,
'win_toolchain',
'get_toolchain_if_necessary.py'),
'--output-json', json_data_file,
] + _GetDesiredVsToolchainHashes()
if force:
get_toolchain_args.append('--force')
subprocess.check_call(get_toolchain_args)
return 0
def GetToolchainDir():
"""Gets location information about the current toolchain (must have been
previously updated by 'update'). This is used for the GN build."""
runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
# If WINDOWSSDKDIR is not set, search the default SDK path and set it.
if not 'WINDOWSSDKDIR' in os.environ:
default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\8.1'
if os.path.isdir(default_sdk_path):
os.environ['WINDOWSSDKDIR'] = default_sdk_path
print '''vs_path = "%s"
sdk_path = "%s"
vs_version = "%s"
wdk_dir = "%s"
runtime_dirs = "%s"
''' % (
os.environ['GYP_MSVS_OVERRIDE_PATH'],
os.environ['WINDOWSSDKDIR'],
GetVisualStudioVersion(),
os.environ.get('WDK_DIR', ''),
';'.join(runtime_dll_dirs or ['None']))
def main():
commands = {
'update': Update,
'get_toolchain_dir': GetToolchainDir,
'copy_dlls': CopyDlls,
}
if len(sys.argv) < 2 or sys.argv[1] not in commands:
print >>sys.stderr, 'Expected one of: %s' % ', '.join(commands)
return 1
return commands[sys.argv[1]](*sys.argv[2:])
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright (c) 2012 OpenStack Foundation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import webob.exc
from neutron.api.v2 import attributes as attr
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import securitygroups_db
from neutron.extensions import securitygroup as ext_sg
from neutron.tests import base
from neutron.tests.unit import test_db_plugin
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_security_group.'
'SecurityGroupTestPlugin')
class SecurityGroupTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attr.RESOURCE_ATTRIBUTE_MAP.update(
ext_sg.RESOURCE_ATTRIBUTE_MAP)
return ext_sg.Securitygroup.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class SecurityGroupsTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def _create_security_group(self, fmt, name, description, **kwargs):
data = {'security_group': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test_tenant'),
'description': description}}
security_group_req = self.new_create_request('security-groups', data,
fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_req.get_response(self.ext_api)
def _build_security_group_rule(self, security_group_id, direction, proto,
port_range_min=None, port_range_max=None,
remote_ip_prefix=None, remote_group_id=None,
tenant_id='test_tenant',
ethertype=const.IPv4):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': proto,
'ethertype': ethertype,
'tenant_id': tenant_id,
'ethertype': ethertype}}
if port_range_min:
data['security_group_rule']['port_range_min'] = port_range_min
if port_range_max:
data['security_group_rule']['port_range_max'] = port_range_max
if remote_ip_prefix:
data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix
if remote_group_id:
data['security_group_rule']['remote_group_id'] = remote_group_id
return data
def _create_security_group_rule(self, fmt, rules, **kwargs):
security_group_rule_req = self.new_create_request(
'security-group-rules', rules, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_rule_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, **kwargs):
res = self._create_security_group(fmt, name, description, **kwargs)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_security_group_rule(self, fmt, rules, **kwargs):
res = self._create_security_group_rule(self.fmt, rules)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def security_group(self, name='webservers', description='webservers',
fmt=None, no_delete=False):
if not fmt:
fmt = self.fmt
security_group = self._make_security_group(fmt, name, description)
yield security_group
if not no_delete:
self._delete('security-groups',
security_group['security_group']['id'])
@contextlib.contextmanager
def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7'
'd1db38eb087',
direction='ingress', protocol=const.PROTO_NAME_TCP,
port_range_min='22', port_range_max='22',
remote_ip_prefix=None, remote_group_id=None,
fmt=None, no_delete=False, ethertype=const.IPv4):
if not fmt:
fmt = self.fmt
rule = self._build_security_group_rule(security_group_id,
direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype=ethertype)
security_group_rule = self._make_security_group_rule(self.fmt, rule)
yield security_group_rule
if not no_delete:
self._delete('security-group-rules',
security_group_rule['security_group_rule']['id'])
def _delete_default_security_group_egress_rules(self, security_group_id):
"""Deletes default egress rules given a security group ID."""
res = self._list(
'security-group-rules',
query_params='security_group_id=%s' % security_group_id)
for r in res['security_group_rules']:
if (r['direction'] == 'egress' and not r['port_range_max'] and
not r['port_range_min'] and not r['protocol']
and not r['remote_ip_prefix']):
self._delete('security-group-rules', r['id'])
def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs):
"""Asserts that the sg rule has expected key/value pairs passed
in as expected_kvs dictionary
"""
for k, v in expected_kvs.iteritems():
self.assertEqual(security_group_rule[k], v)
class SecurityGroupsTestCaseXML(SecurityGroupsTestCase):
fmt = 'xml'
class SecurityGroupTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
securitygroups_db.SecurityGroupDbMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with security groups.
"""
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ["security-group"]
def create_port(self, context, port):
tenant_id = self._get_tenant_id_for_create(context, port['port'])
default_sg = self._ensure_default_security_group(context, tenant_id)
if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
session = context.session
with session.begin(subtransactions=True):
sgids = self._get_security_groups_on_port(context, port)
port = super(SecurityGroupTestPlugin, self).create_port(context,
port)
self._process_port_create_security_group(context, port,
sgids)
return port
def update_port(self, context, id, port):
session = context.session
with session.begin(subtransactions=True):
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
# delete the port binding and read it with the new rules
self._delete_port_security_group_bindings(context, id)
port['port']['id'] = id
self._process_port_create_security_group(
context, port['port'],
port['port'].get(ext_sg.SECURITYGROUPS))
port = super(SecurityGroupTestPlugin, self).update_port(
context, id, port)
return port
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
return super(SecurityGroupTestPlugin, self).create_network(context,
network)
def get_ports(self, context, filters=None, fields=None,
sorts=[], limit=None, marker=None,
page_reverse=False):
neutron_lports = super(SecurityGroupTestPlugin, self).get_ports(
context, filters, sorts=sorts, limit=limit, marker=marker,
page_reverse=page_reverse)
return neutron_lports
class SecurityGroupDBTestCase(SecurityGroupsTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
ext_mgr = ext_mgr or SecurityGroupTestExtensionManager()
super(SecurityGroupDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group(self):
name = 'webservers'
description = 'my webservers'
keys = [('name', name,), ('description', description)]
with self.security_group(name, description) as security_group:
for k, v, in keys:
self.assertEqual(security_group['security_group'][k], v)
# Verify that default egress rules have been created
sg_rules = security_group['security_group']['security_group_rules']
self.assertEqual(len(sg_rules), 2)
v4_rules = [r for r in sg_rules if r['ethertype'] == const.IPv4]
self.assertEqual(len(v4_rules), 1)
v4_rule = v4_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_rule, expected)
v6_rules = [r for r in sg_rules if r['ethertype'] == const.IPv6]
self.assertEqual(len(v6_rules), 1)
v6_rule = v6_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_rule, expected)
def test_update_security_group(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['security_group']['name'],
data['security_group']['name'])
self.assertEqual(res['security_group']['description'],
data['security_group']['description'])
def test_update_security_group_name_to_default_fail(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'default',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_update_default_security_group_name_fail(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_update_default_security_group_with_description(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['security_group']['description'],
data['security_group']['description'])
def test_default_security_group(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
def test_create_default_security_group_fail(self):
name = 'default'
description = 'my webservers'
res = self._create_security_group(self.fmt, name, description)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_list_security_groups(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as security_groups:
self._test_list_resources('security-group',
security_groups,
query_params='description=sg')
def test_list_security_groups_with_sort(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_sort('security-group',
(sg3, sg2, sg1),
[('name', 'desc')],
query_params='description=sg')
def test_list_security_groups_with_pagination(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_pagination('security-group',
(sg1, sg2, sg3),
('name', 'asc'), 2, 2,
query_params='description=sg')
def test_list_security_groups_with_pagination_reverse(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_pagination_reverse(
'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2,
query_params='description=sg')
def test_create_security_group_rule_ethertype_invalid_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
ethertype = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', const.PROTO_NAME_TCP, '22',
'22', None, None, ethertype=ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_invalid_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
for bad_prefix in ['bad_ip', 256, "2001:db8:a::123/129", '172.30./24']:
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
remote_ip_prefix = bad_prefix
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_invalid_ethertype_for_prefix(self):
name = 'webservers'
description = 'my webservers'
test_addr = {'192.168.1.1/24': 'ipv4', '192.168.1.1/24': 'IPv6',
'2001:db8:1234::/48': 'ipv6',
'2001:db8:1234::/48': 'IPv4'}
for prefix, ether in test_addr.iteritems():
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
ethertype = ether
remote_ip_prefix = prefix
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix,
None,
None,
ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_unmasked_prefix(self):
name = 'webservers'
description = 'my webservers'
addr = {'10.1.2.3': {'mask': '32', 'ethertype': 'IPv4'},
'fe80::2677:3ff:fe7d:4c': {'mask': '128', 'ethertype': 'IPv6'}}
for ip in addr:
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
ethertype = addr[ip]['ethertype']
remote_ip_prefix = ip
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix,
None,
None,
ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, 201)
res_sg = self.deserialize(self.fmt, res)
prefix = res_sg['security_group_rule']['remote_ip_prefix']
self.assertEqual(prefix, '%s/%s' % (ip, addr[ip]['mask']))
def test_create_security_group_rule_tcp_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = const.PROTO_NUM_TCP # TCP
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol, '22', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_case_insensitive(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'TCP'
port_range_min = 22
port_range_max = 22
ethertype = 'ipV4'
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
ethertype=ethertype) as rule:
# the lower case value will be return
self.assertEqual(rule['security_group_rule']['protocol'],
protocol.lower())
self.assertEqual(rule['security_group_rule']['ethertype'],
const.IPv4)
def test_get_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
remote_group_id = sg['security_group']['id']
res = self.new_show_request('security-groups', remote_group_id)
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix):
group = self.deserialize(
self.fmt, res.get_response(self.ext_api))
sg_rule = group['security_group']['security_group_rules']
self.assertEqual(group['security_group']['id'],
remote_group_id)
self.assertEqual(len(sg_rule), 3)
sg_rule = [r for r in sg_rule if r['direction'] == 'ingress']
for k, v, in keys:
self.assertEqual(sg_rule[0][k], v)
def test_delete_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description, no_delete=True) as sg:
remote_group_id = sg['security_group']['id']
self._delete('security-groups', remote_group_id,
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_admin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_nonadmin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
neutron_context = context.Context('', 'test-tenant')
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPConflict.code,
neutron_context=neutron_context)
def test_security_group_list_creates_default_security_group(self):
neutron_context = context.Context('', 'test-tenant')
sg = self._list('security-groups',
neutron_context=neutron_context).get('security_groups')
self.assertEqual(len(sg), 1)
def test_default_security_group_rules(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
security_group_id = groups['security_groups'][0]['id']
res = self.new_list_request('security-group-rules')
rules = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(rules['security_group_rules']), 4)
# Verify default rule for v4 egress
sg_rules = rules['security_group_rules']
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv4
]
self.assertEqual(len(rules), 1)
v4_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_egress, expected)
# Verify default rule for v6 egress
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv6
]
self.assertEqual(len(rules), 1)
v6_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_egress, expected)
# Verify default rule for v4 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv4
]
self.assertEqual(len(rules), 1)
v4_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv4,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_ingress, expected)
# Verify default rule for v6 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv6
]
self.assertEqual(len(rules), 1)
v6_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv6,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_ingress, expected)
def test_create_security_group_rule_remote_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description) as sg2:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_group_id = sg2['security_group']['id']
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_group_id', remote_group_id),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id
) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_and_code(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# port_range_min (ICMP type) is greater than port_range_max
# (ICMP code) in order to confirm min <= max port check is
# not called for ICMP.
port_range_min = 8
port_range_max = 5
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# ICMP type
port_range_min = 8
# ICMP code
port_range_max = None
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_source_group_ip_and_ip_prefix(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_bad_security_group_id(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant(self):
with self.security_group() as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': "bad_tenant"}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant_remote_group_id(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
sg2 = self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg2['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant',
'remote_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant_security_group_rule(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant'}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_remote_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_duplicate_rules(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_min_port_greater_max(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
for protocol in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP,
const.PROTO_NUM_TCP, const.PROTO_NUM_UDP]:
rule = self._build_security_group_rule(
sg['security_group']['id'],
'ingress', protocol, '50', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_ports_but_no_protocol(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', None, '22', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_port_range_min_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', None)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_port_range_max_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, None, '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_type_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '256', None)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_code_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '8', '256')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_with_code_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, None, '2')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_list_ports_security_group(self):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'])
req = self.new_list_request('ports')
res = req.get_response(self.api)
ports = self.deserialize(self.fmt, res)
port = ports['ports'][0]
self.assertEqual(len(port[ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['id'])
def test_list_security_group_rules(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_resources('security-group-rule',
[sgr1, sgr2, sgr3],
query_params=q)
def test_list_security_group_rules_with_sort(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_sort('security-group-rule',
(sgr3, sgr2, sgr1),
[('port_range_max', 'desc')],
query_params=q)
def test_list_security_group_rules_with_pagination(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_pagination(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params=q)
def test_list_security_group_rules_with_pagination_reverse(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
self._test_list_with_pagination_reverse(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params='direction=egress')
def test_update_port_with_security_group(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[sg['security_group']['id']]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# Test update port without security group
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name']}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
self._delete('ports', port['port']['id'])
def test_update_port_with_multiple_security_groups(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg1:
with self.security_group() as sg2:
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1['security_group']['id'],
sg2['security_group']['id']])
port = self.deserialize(self.fmt, res)
self.assertEqual(len(
port['port'][ext_sg.SECURITYGROUPS]), 2)
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_empty_list(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': []}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_none(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': None}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_create_port_with_bad_security_group(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['bad_id'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_delete_security_group_port_in_use(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# try to delete security group that's in use
res = self._delete('security-groups',
sg['security_group']['id'],
webob.exc.HTTPConflict.code)
# delete the blocking port
self._delete('ports', port['port']['id'])
def test_create_security_group_rule_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule1 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '23',
'23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule1 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_allow_all_ipv4(self):
with self.security_group() as sg:
rule = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv4',
'tenant_id': 'test_tenant'}
res = self._create_security_group_rule(
self.fmt, {'security_group_rule': rule})
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_allow_all_ipv4_v6_bulk(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule_v4 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv4',
'tenant_id': 'test_tenant'}
rule_v6 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv6',
'tenant_id': 'test_tenant'}
rules = {'security_group_rules': [rule_v4, rule_v6]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_duplicate_rule_in_post(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_in_post_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_db(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_db_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_different_security_group_ids(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg1:
with self.security_group() as sg2:
rule1 = self._build_security_group_rule(
sg1['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg2['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_invalid_ethertype(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype='IPv5')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_invalid_protocol(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp/ip'
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_port_with_non_uuid(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['not_valid'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
class TestConvertIPPrefixToCIDR(base.BaseTestCase):
def test_convert_bad_ip_prefix_to_cidr(self):
for val in ['bad_ip', 256, "2001:db8:a::123/129"]:
self.assertRaises(n_exc.InvalidCIDR,
ext_sg.convert_ip_prefix_to_cidr, val)
self.assertIsNone(ext_sg.convert_ip_prefix_to_cidr(None))
def test_convert_ip_prefix_no_netmask_to_cidr(self):
addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'}
for k, v in addr.iteritems():
self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(k),
'%s/%s' % (k, v))
def test_convert_ip_prefix_with_netmask_to_cidr(self):
addresses = ['10.1.0.0/16', '10.1.2.3/32', '2001:db8:1234::/48']
for addr in addresses:
self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(addr), addr)
class TestSecurityGroupsXML(TestSecurityGroups):
fmt = 'xml'
|
|
from __future__ import absolute_import
import os
import re
import six
import time
import logging
import posixpath
from sentry.models import Project, EventError
from sentry.plugins import Plugin2
from sentry.lang.native.symbolizer import Symbolizer
from sentry.lang.native.utils import find_all_stacktraces, \
find_apple_crash_report_referenced_images, get_sdk_from_event, \
find_stacktrace_referenced_images, get_sdk_from_apple_system_info, \
APPLE_SDK_MAPPING
from sentry.utils.native import parse_addr
logger = logging.getLogger(__name__)
model_re = re.compile(r'^(\S+?)\d')
APP_BUNDLE_PATHS = (
'/var/containers/Bundle/Application/',
'/private/var/containers/Bundle/Application/',
)
SIM_PATH = '/Developer/CoreSimulator/Devices/'
SIM_APP_PATH = '/Containers/Bundle/Application/'
NON_APP_FRAMEWORKS = (
'/Frameworks/libswiftCore.dylib',
)
SIGNAL_NAMES = {
1: 'SIGHUP',
2: 'SIGINT',
3: 'SIGQUIT',
4: 'SIGILL',
5: 'SIGTRAP',
6: 'SIGABRT',
7: 'SIGEMT',
8: 'SIGFPE',
9: 'SIGKILL',
10: 'SIGBUS',
11: 'SIGSEGV',
12: 'SIGSYS',
13: 'SIGPIPE',
14: 'SIGALRM',
15: 'SIGTERM',
16: 'SIGURG',
17: 'SIGSTOP',
18: 'SIGTSTP',
19: 'SIGCONT',
20: 'SIGCHLD',
21: 'SIGTTIN',
22: 'SIGTTOU',
24: 'SIGXCPU',
25: 'SIGXFSZ',
26: 'SIGVTALRM',
27: 'SIGPROF',
28: 'SIGWINCH',
29: 'SIGINFO',
31: 'SIGUSR2',
}
def append_error(data, err):
data.setdefault('errors', []).append(err)
def process_posix_signal(data):
signal = data.get('signal', -1)
signal_name = data.get('name')
if signal_name is None:
signal_name = SIGNAL_NAMES.get(signal)
return {
'signal': signal,
'name': signal_name,
'code': data.get('code'),
'code_name': data.get('code_name'),
}
def exception_from_apple_error_or_diagnosis(error, diagnosis=None):
rv = {}
error = error or {}
mechanism = {}
if 'mach' in error:
mechanism['mach_exception'] = error['mach']
if 'signal' in error:
mechanism['posix_signal'] = process_posix_signal(error['signal'])
if mechanism:
mechanism.setdefault('type', 'cocoa')
rv['mechanism'] = mechanism
# Start by getting the error from nsexception
if error:
nsexception = error.get('nsexception')
if nsexception:
rv['type'] = nsexception['name']
if 'value' in nsexception:
rv['value'] = nsexception['value']
# If we don't have an error yet, try to build one from reason and
# diagnosis
if 'value' not in rv:
if 'reason' in error:
rv['value'] = error['reason']
elif 'diagnosis' in error:
rv['value'] = error['diagnosis']
elif 'mach_exception' in mechanism:
rv['value'] = mechanism['mach_exception'] \
.get('exception_name') or 'Mach Exception'
elif 'posix_signal' in mechanism:
rv['value'] = mechanism['posix_signal'] \
.get('name') or 'Posix Signal'
else:
rv['value'] = 'Unknown'
# Figure out a reasonable type
if 'type' not in rv:
if 'mach_exception' in mechanism:
rv['type'] = 'MachException'
elif 'posix_signal' in mechanism:
rv['type'] = 'Signal'
else:
rv['type'] = 'Unknown'
if rv:
return rv
def is_in_app(frame, app_uuid=None):
if app_uuid is not None:
frame_uuid = frame.get('uuid')
if frame_uuid == app_uuid:
return True
fn = frame.get('package') or ''
if not (fn.startswith(APP_BUNDLE_PATHS) or
(SIM_PATH in fn and SIM_APP_PATH in fn)):
return False
if fn.endswith(NON_APP_FRAMEWORKS):
return False
return True
def convert_stacktrace(frames, system=None, notable_addresses=None):
app_uuid = None
if system:
app_uuid = system.get('app_uuid')
if app_uuid is not None:
app_uuid = app_uuid.lower()
converted_frames = []
longest_addr = 0
for frame in reversed(frames):
fn = frame.get('filename')
# We only record the offset if we found a symbol but we did not
# find a line number. In that case it's the offset in bytes from
# the beginning of the symbol.
function = frame.get('symbol_name') or '<unknown>'
lineno = frame.get('line')
offset = None
if not lineno:
offset = frame['instruction_addr'] - frame['symbol_addr']
cframe = {
'abs_path': fn,
'filename': fn and posixpath.basename(fn) or None,
# This can come back as `None` from the symbolizer, in which
# case we need to fill something else in or we will fail
# later fulfill the interface requirements which say that a
# function needs to be provided.
'function': function,
'package': frame.get('object_name'),
'symbol_addr': '%x' % frame['symbol_addr'],
'instruction_addr': '%x' % frame['instruction_addr'],
'instruction_offset': offset,
'lineno': lineno,
}
cframe['in_app'] = is_in_app(cframe, app_uuid)
converted_frames.append(cframe)
longest_addr = max(longest_addr, len(cframe['symbol_addr']),
len(cframe['instruction_addr']))
# Pad out addresses to be of the same length and add prefix
for frame in converted_frames:
for key in 'symbol_addr', 'instruction_addr':
frame[key] = '0x' + frame[key][2:].rjust(longest_addr, '0')
if converted_frames and notable_addresses:
converted_frames[-1]['vars'] = notable_addresses
if converted_frames:
return {'frames': converted_frames}
def inject_apple_backtrace(data, frames, diagnosis=None, error=None,
system=None, notable_addresses=None,
thread_id=None):
stacktrace = convert_stacktrace(frames, system, notable_addresses)
if error or diagnosis:
error = error or {}
exc = exception_from_apple_error_or_diagnosis(error, diagnosis)
if exc is not None:
exc['stacktrace'] = stacktrace
exc['thread_id'] = thread_id
data['sentry.interfaces.Exception'] = {'values': [exc]}
# Since we inject the exception late we need to make sure that
# we set the event type to error as it would be set to
# 'default' otherwise.
data['type'] = 'error'
return True
data['sentry.interfaces.Stacktrace'] = stacktrace
return False
def inject_apple_device_data(data, system):
contexts = data.setdefault('contexts', {})
device = contexts.setdefault('device', {})
os = contexts.setdefault('os', {})
try:
os['name'] = APPLE_SDK_MAPPING[system['system_name']]
except LookupError:
os['name'] = system.get('system_name') or 'Generic Apple'
if 'system_version' in system:
os['version'] = system['system_version']
if 'os_version' in system:
os['build'] = system['os_version']
if 'kernel_version' in system:
os['kernel_version'] = system['kernel_version']
if 'jailbroken' in system:
os['rooted'] = system['jailbroken']
if 'cpu_arch' in system:
device['arch'] = system['cpu_arch']
if 'model' in system:
device['model_id'] = system['model']
if 'machine' in system:
device['model'] = system['machine']
match = model_re.match(system['machine'])
if match is not None:
device['family'] = match.group(1)
def dump_crash_report(report):
import json
with open('/tmp/sentry-apple-crash-report-%s.json' % time.time(), 'w') as f:
json.dump(report, f, indent=2)
def preprocess_apple_crash_event(data):
"""This processes the "legacy" AppleCrashReport."""
crash_report = data.get('sentry.interfaces.AppleCrashReport')
if crash_report is None:
return
if os.environ.get('SENTRY_DUMP_APPLE_CRASH_REPORT') == '1':
dump_crash_report(crash_report)
project = Project.objects.get_from_cache(
id=data['project'],
)
system = None
errors = []
threads = []
crash = crash_report['crash']
crashed_thread = None
threads = {}
raw_threads = {}
for raw_thread in crash['threads']:
if raw_thread['crashed'] and raw_thread.get('backtrace'):
crashed_thread = raw_thread
raw_threads[raw_thread['index']] = raw_thread
threads[raw_thread['index']] = {
'id': raw_thread['index'],
'name': raw_thread.get('name'),
'current': raw_thread.get('current_thread', False),
'crashed': raw_thread.get('crashed', False),
}
sdk_info = get_sdk_from_apple_system_info(system)
referenced_images = find_apple_crash_report_referenced_images(
crash_report['binary_images'], raw_threads.values())
sym = Symbolizer(project, crash_report['binary_images'],
referenced_images=referenced_images)
with sym:
if crashed_thread is None:
append_error(data, {
'type': EventError.NATIVE_NO_CRASHED_THREAD,
})
else:
system = crash_report.get('system')
try:
bt, errors = sym.symbolize_backtrace(
crashed_thread['backtrace']['contents'], sdk_info)
for error in errors:
append_error(data, error)
if inject_apple_backtrace(data, bt, crash.get('diagnosis'),
crash.get('error'), system,
crashed_thread.get('notable_addresses'),
crashed_thread['index']):
# We recorded an exception, so in this case we can
# skip having the stacktrace.
threads[crashed_thread['index']]['stacktrace'] = None
except Exception:
logger.exception('Failed to symbolicate')
errors.append({
'type': EventError.NATIVE_INTERNAL_FAILURE,
'error': 'The symbolicator encountered an internal failure',
})
for thread in six.itervalues(threads):
# If we were told to skip the stacktrace, skip it indeed
if thread.get('stacktrace', Ellipsis) is None:
continue
raw_thread = raw_threads.get(thread['id'])
if raw_thread is None or not raw_thread.get('backtrace'):
continue
bt, errors = sym.symbolize_backtrace(
raw_thread['backtrace']['contents'], sdk_info)
for error in errors:
append_error(data, error)
thread['stacktrace'] = convert_stacktrace(
bt, system, raw_thread.get('notable_addresses'))
if threads:
data['threads'] = {
'values': sorted(threads.values(), key=lambda x: x['id']),
}
if system:
inject_apple_device_data(data, system)
return data
def resolve_frame_symbols(data):
debug_meta = data.get('debug_meta')
if not debug_meta:
return
debug_images = debug_meta['images']
sdk_info = get_sdk_from_event(data)
stacktraces = find_all_stacktraces(data)
if not stacktraces:
return
project = Project.objects.get_from_cache(
id=data['project'],
)
errors = []
referenced_images = find_stacktrace_referenced_images(
debug_images, stacktraces)
sym = Symbolizer(project, debug_images,
referenced_images=referenced_images)
frame = None
idx = -1
def report_error(e):
errors.append({
'type': EventError.NATIVE_INTERNAL_FAILURE,
'frame': frame,
'error': 'frame #%d: %s: %s' % (
idx,
e.__class__.__name__,
six.text_type(e),
)
})
longest_addr = 0
processed_frames = []
with sym:
for stacktrace in stacktraces:
for idx, frame in enumerate(stacktrace['frames']):
if 'image_addr' not in frame or \
'instruction_addr' not in frame or \
'symbol_addr' not in frame:
continue
try:
sfrm = sym.symbolize_frame({
'object_name': frame.get('package'),
'object_addr': frame['image_addr'],
'instruction_addr': frame['instruction_addr'],
'symbol_addr': frame['symbol_addr'],
}, sdk_info, report_error=report_error)
if not sfrm:
continue
# XXX: log here if symbol could not be found?
frame['function'] = sfrm.get('symbol_name') or \
frame.get('function') or '<unknown>'
frame['abs_path'] = sfrm.get('filename') or None
if frame['abs_path']:
frame['filename'] = posixpath.basename(frame['abs_path'])
if sfrm.get('line') is not None:
frame['lineno'] = sfrm['line']
else:
frame['instruction_offset'] = \
parse_addr(sfrm['instruction_addr']) - \
parse_addr(sfrm['symbol_addr'])
if sfrm.get('column') is not None:
frame['colno'] = sfrm['column']
frame['package'] = sfrm['object_name'] or frame.get('package')
frame['symbol_addr'] = '0x%x' % parse_addr(sfrm['symbol_addr'])
frame['instruction_addr'] = '0x%x' % parse_addr(
sfrm['instruction_addr'])
frame['in_app'] = is_in_app(frame)
longest_addr = max(longest_addr, len(frame['symbol_addr']),
len(frame['instruction_addr']))
processed_frames.append(frame)
except Exception:
logger.exception('Failed to symbolicate')
errors.append({
'type': EventError.NATIVE_INTERNAL_FAILURE,
'error': 'The symbolicator encountered an internal failure',
})
# Pad out addresses to be of the same length
for frame in processed_frames:
for key in 'symbol_addr', 'instruction_addr':
frame[key] = '0x' + frame[key][2:].rjust(longest_addr - 2, '0')
if errors:
data.setdefault('errors', []).extend(errors)
return data
class NativePlugin(Plugin2):
can_disable = False
def get_event_preprocessors(self, **kwargs):
return [preprocess_apple_crash_event, resolve_frame_symbols]
|
|
from __future__ import absolute_import
import mimetypes
import os
import tempfile
import logging
import shutil
import string
import copy
from uuid import uuid4
import errno
from django.apps import apps
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.contrib.auth.models import User, Group
from django.core.files import File
from django.core.files.uploadedfile import UploadedFile
from django.core.files.storage import DefaultStorage
from django.core.validators import validate_email
from mezzanine.conf import settings
from hs_core.signals import pre_create_resource, post_create_resource, pre_add_files_to_resource, \
post_add_files_to_resource
from hs_core.models import AbstractResource, BaseResource, ResourceFile
from hs_core.hydroshare.hs_bagit import create_bag_files
from django_irods.icommands import SessionException
from django_irods.storage import IrodsStorage
from theme.models import QuotaMessage
logger = logging.getLogger(__name__)
class ResourceFileSizeException(Exception):
pass
class ResourceFileValidationException(Exception):
pass
class QuotaException(Exception):
pass
def get_resource_types():
resource_types = []
for model in apps.get_models():
if issubclass(model, AbstractResource) and model != BaseResource:
if not getattr(model, 'archived_model', False):
resource_types.append(model)
return resource_types
def get_resource_instance(app, model_name, pk, or_404=True):
model = apps.get_model(app, model_name)
if or_404:
return get_object_or_404(model, pk=pk)
else:
return model.objects.get(pk=pk)
def get_resource_by_shortkey(shortkey, or_404=True):
try:
res = BaseResource.objects.get(short_id=shortkey)
except BaseResource.DoesNotExist:
if or_404:
raise Http404(shortkey)
else:
raise
content = res.get_content_model()
assert content, (res, res.content_model)
return content
def get_resource_by_doi(doi, or_404=True):
try:
res = BaseResource.objects.get(doi=doi)
except BaseResource.DoesNotExist:
if or_404:
raise Http404(doi)
else:
raise
content = res.get_content_model()
assert content, (res, res.content_model)
return content
def user_from_id(user, raise404=True):
if isinstance(user, User):
return user
try:
tgt = User.objects.get(username=user)
except ObjectDoesNotExist:
try:
tgt = User.objects.get(email=user)
except ObjectDoesNotExist:
try:
tgt = User.objects.get(pk=int(user))
except ValueError:
if raise404:
raise Http404('User not found')
else:
raise User.DoesNotExist
except ObjectDoesNotExist:
if raise404:
raise Http404('User not found')
else:
raise
return tgt
def group_from_id(grp):
if isinstance(grp, Group):
return grp
try:
tgt = Group.objects.get(name=grp)
except ObjectDoesNotExist:
try:
tgt = Group.objects.get(pk=int(grp))
except TypeError:
raise Http404('Group not found')
except ObjectDoesNotExist:
raise Http404('Group not found')
return tgt
def get_user_zone_status_info(user):
"""
This function should be called to determine whether the site is in production and whether user
zone functionality should be enabled or not on the web site front end
Args:
user: the requesting user
Returns:
in_production, enable_user_zone where both are boolean indicating whether the site is
in production and whether user zone functionality should be enabled or not on the web site
front end
"""
if user is None:
return None, None
if not hasattr(user, 'userprofile') or user.userprofile is None:
return None, None
in_production = True if settings.IRODS_USERNAME == settings.HS_WWW_IRODS_PROXY_USER else False
enable_user_zone = user.userprofile.create_irods_user_account
if not in_production and enable_user_zone:
# if these settings are not empty, for example, in users' local
# development environment for testing, user_zone selection is shown
if (not settings.HS_WWW_IRODS_PROXY_USER_PWD or
not settings.HS_WWW_IRODS_HOST or not settings.HS_WWW_IRODS_ZONE):
enable_user_zone = False
return in_production, enable_user_zone
def is_federated(homepath):
"""
Check if the selected file via the iRODS browser is from a federated zone or not
Args:
homepath: the logical iRODS file name with full logical path, e.g., selected from
iRODS browser
Returns:
True is the selected file indicated by homepath is from a federated zone, False if otherwise
"""
homepath = homepath.strip()
homepath_list = homepath.split('/')
# homepath is an iRODS logical path in the format of
# /irods_zone/home/irods_account_username/collection_relative_path, so homepath_list[1]
# is the irods_zone which we can use to form the fed_proxy_path to check whether
# fed_proxy_path exists to hold hydroshare resources in a federated zone
if homepath_list[1]:
fed_proxy_path = os.path.join(homepath_list[1], 'home',
settings.HS_LOCAL_PROXY_USER_IN_FED_ZONE)
fed_proxy_path = '/' + fed_proxy_path
else:
# the test path input is invalid, return False meaning it is not federated
return False
if settings.REMOTE_USE_IRODS:
irods_storage = IrodsStorage('federated')
else:
irods_storage = IrodsStorage()
# if the iRODS proxy user in hydroshare zone can list homepath and the federation zone proxy
# user path, it is federated; otherwise, it is not federated
return irods_storage.exists(homepath) and irods_storage.exists(fed_proxy_path)
def get_federated_zone_home_path(filepath):
"""
Args:
filepath: the iRODS data object file path that included zone name in the format of
/zone_name/home/user_name/file_path
Returns:
the zone name extracted from filepath
"""
if filepath and filepath.startswith('/'):
split_path_strs = filepath.split('/')
# the Zone name should follow the first slash
zone = split_path_strs[1]
return '/{zone}/home/{local_proxy_user}'.format(
zone=zone, local_proxy_user=settings.HS_LOCAL_PROXY_USER_IN_FED_ZONE)
else:
return ''
# TODO: replace with a cache facility that has automatic cleanup
# TODO: pass a list rather than a string to allow commas in filenames.
def get_fed_zone_files(irods_fnames):
"""
Get the files from iRODS federated zone to Django server for metadata extraction on-demand
for specific resource types
Args:
irods_fnames: the logical iRODS file names with full logical path separated by comma
Returns:
a list of the named temp files which have been copied over to local Django server
or raise exceptions if input parameter is wrong or iRODS operations fail
Note: application must delete these files after use.
"""
ret_file_list = []
if isinstance(irods_fnames, basestring):
ifnames = string.split(irods_fnames, ',')
elif isinstance(irods_fnames, list):
ifnames = irods_fnames
else:
raise ValueError("Input parameter to get_fed_zone_files() must be String or List")
irods_storage = IrodsStorage('federated')
for ifname in ifnames:
fname = os.path.basename(ifname.rstrip(os.sep))
# TODO: this is statistically unique but not guaranteed to be unique.
tmpdir = os.path.join(settings.TEMP_FILE_DIR, uuid4().hex)
tmpfile = os.path.join(tmpdir, fname)
try:
os.makedirs(tmpdir)
except OSError as ex:
if ex.errno == errno.EEXIST:
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
else:
raise Exception(ex.message)
irods_storage.getFile(ifname, tmpfile)
ret_file_list.append(tmpfile)
return ret_file_list
# TODO: make the local cache file (and cleanup) part of ResourceFile state?
def get_file_from_irods(res_file):
"""
Copy the file (res_file) from iRODS (local or federated zone)
over to django (temp directory) which is
necessary for manipulating the file (e.g. metadata extraction).
Note: The caller is responsible for cleaning the temp directory
:param res_file: an instance of ResourceFile
:return: location of the copied file
"""
res = res_file.resource
istorage = res.get_irods_storage()
res_file_path = res_file.storage_path
file_name = os.path.basename(res_file_path)
tmpdir = os.path.join(settings.TEMP_FILE_DIR, uuid4().hex)
tmpfile = os.path.join(tmpdir, file_name)
# TODO: If collisions occur, really bad things happen.
# TODO: Directories are never cleaned up when unused. need cache management.
try:
os.makedirs(tmpdir)
except OSError as ex:
if ex.errno == errno.EEXIST:
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
else:
raise Exception(ex.message)
istorage.getFile(res_file_path, tmpfile)
copied_file = tmpfile
return copied_file
# TODO: should be ResourceFile.replace
def replace_resource_file_on_irods(new_file, original_resource_file, user):
"""
Replaces the specified resource file with file (new_file) by copying to iRODS
(local or federated zone)
:param new_file: file path for the file to be copied to iRODS
:param original_resource_file: an instance of ResourceFile that is to be replaced
:param user: user who is replacing the resource file.
:return:
"""
ori_res = original_resource_file.resource
istorage = ori_res.get_irods_storage()
ori_storage_path = original_resource_file.storage_path
# Note: this doesn't update metadata at all.
istorage.saveFile(new_file, ori_storage_path, True)
# do this so that the bag will be regenerated prior to download of the bag
resource_modified(ori_res, by_user=user, overwrite_bag=False)
# TODO: should be inside ResourceFile, and federation logic should be transparent.
def get_resource_file_name_and_extension(res_file):
"""
Gets the full file name with path, file base name, and extension of the specified resource file
:param res_file: an instance of ResourceFile for which file extension to be retrieved
:return: (full filename with path, full file base name, file extension)
ex: "/my_path_to/ABC.nc" --> ("/my_path_to/ABC.nc", "ABC.nc", ".nc")
"""
f_fullname = res_file.storage_path
f_basename = os.path.basename(f_fullname)
_, file_ext = os.path.splitext(f_fullname)
return f_fullname, f_basename, file_ext
# TODO: should be ResourceFile.url
def get_resource_file_url(res_file):
"""
Gets the download url of the specified resource file
:param res_file: an instance of ResourceFile for which download url is to be retrieved
:return: download url for the resource file
"""
if res_file.resource_file:
f_url = res_file.resource_file.url
elif res_file.fed_resource_file:
f_url = res_file.fed_resource_file.url
else:
f_url = ''
return f_url
# TODO: should be classmethod of ResourceFile
def get_resource_files_by_extension(resource, file_extension):
matching_files = []
for res_file in resource.files.all():
_, _, file_ext = get_resource_file_name_and_extension(res_file)
if file_ext == file_extension:
matching_files.append(res_file)
return matching_files
def get_resource_file_by_name(resource, file_name):
for res_file in resource.files.all():
_, fl_name, _ = get_resource_file_name_and_extension(res_file)
if fl_name == file_name:
return res_file
return None
def get_resource_file_by_id(resource, file_id):
return resource.files.filter(id=file_id).first()
# TODO: This is unnecessary since delete now cascades.
def delete_fed_zone_file(file_name_with_full_path):
'''
Args:
file_name_with_full_path: the absolute full logical path in a federated iRODS zone
Returns:
None, but exceptions will be raised if there is an issue with iRODS delete operation
'''
istorage = IrodsStorage('federated')
istorage.delete(file_name_with_full_path)
def replicate_resource_bag_to_user_zone(user, res_id):
"""
Replicate resource bag to iRODS user zone
Args:
user: the requesting user
res_id: the resource id with its bag to be replicated to iRODS user zone
Returns:
None, but exceptions will be raised if there is an issue with iRODS operation
"""
# do on-demand bag creation
res = get_resource_by_shortkey(res_id)
res_coll = res.root_path
istorage = res.get_irods_storage()
bag_modified = "false"
# needs to check whether res_id collection exists before getting/setting AVU on it to
# accommodate the case where the very same resource gets deleted by another request when
# it is getting downloaded
# TODO: why would we want to do anything at all if the resource does not exist???
if istorage.exists(res_coll):
bag_modified = istorage.getAVU(res_coll, 'bag_modified')
if bag_modified.lower() == "true":
# import here to avoid circular import issue
from hs_core.tasks import create_bag_by_irods
create_bag_by_irods(res_id)
# do replication of the resource bag to irods user zone
if not res.resource_federation_path:
istorage.set_fed_zone_session()
src_file = res.bag_path
# TODO: allow setting destination path
tgt_file = '/{userzone}/home/{username}/{resid}.zip'.format(
userzone=settings.HS_USER_IRODS_ZONE, username=user.username, resid=res_id)
fsize = istorage.size(src_file)
validate_user_quota(user, fsize)
istorage.copyFiles(src_file, tgt_file)
else:
raise ValidationError("Resource {} does not exist in iRODS".format(res.short_id))
def copy_resource_files_and_AVUs(src_res_id, dest_res_id):
"""
Copy resource files and AVUs from source resource to target resource including both
on iRODS storage and on Django database
:param src_res_id: source resource uuid
:param dest_res_id: target resource uuid
:return:
"""
avu_list = ['bag_modified', 'metadata_dirty', 'isPublic', 'resourceType']
src_res = get_resource_by_shortkey(src_res_id)
tgt_res = get_resource_by_shortkey(dest_res_id)
# This makes the assumption that the destination is in the same exact zone.
# Also, bags and similar attached files are not copied.
istorage = src_res.get_irods_storage()
# This makes an exact copy of all physical files.
src_files = os.path.join(src_res.root_path, 'data')
# This has to be one segment short of the source because it is a target directory.
dest_files = tgt_res.root_path
istorage.copyFiles(src_files, dest_files)
src_coll = src_res.root_path
tgt_coll = tgt_res.root_path
for avu_name in avu_list:
value = istorage.getAVU(src_coll, avu_name)
# make formerly public things private
if avu_name == 'isPublic':
istorage.setAVU(tgt_coll, avu_name, 'false')
# bag_modified AVU needs to be set to true for copied resource
elif avu_name == 'bag_modified':
istorage.setAVU(tgt_coll, avu_name, 'true')
# everything else gets copied literally
else:
istorage.setAVU(tgt_coll, avu_name, value)
# link copied resource files to Django resource model
files = src_res.files.all()
# if resource files are part of logical files, then logical files also need copying
src_logical_files = list(set([f.logical_file for f in files if f.has_logical_file]))
map_logical_files = {}
for src_logical_file in src_logical_files:
map_logical_files[src_logical_file] = src_logical_file.get_copy()
for n, f in enumerate(files):
folder, base = os.path.split(f.short_path) # strips object information.
new_resource_file = ResourceFile.create(tgt_res, base, folder=folder)
# if the original file is part of a logical file, then
# add the corresponding new resource file to the copy of that logical file
if f.has_logical_file:
tgt_logical_file = map_logical_files[f.logical_file]
tgt_logical_file.add_resource_file(new_resource_file)
if src_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new collection resource will not contain "deleted resources"
tgt_res.resources = src_res.resources.all()
def copy_and_create_metadata(src_res, dest_res):
"""
Copy metadata from source resource to target resource except identifier, publisher, and date
which need to be created for the target resource as appropriate. This method is used for
resource copying and versioning.
:param src_res: source resource
:param dest_res: target resource
:return:
"""
# copy metadata from source resource to target resource except three elements
exclude_elements = ['identifier', 'publisher', 'date']
dest_res.metadata.copy_all_elements_from(src_res.metadata, exclude_elements)
# create Identifier element that is specific to the new resource
dest_res.metadata.create_element('identifier', name='hydroShareIdentifier',
url='{0}/resource/{1}'.format(current_site_url(),
dest_res.short_id))
# create date element that is specific to the new resource
dest_res.metadata.create_element('date', type='created', start_date=dest_res.created)
dest_res.metadata.create_element('date', type='modified', start_date=dest_res.updated)
# copy date element to the new resource if exists
src_res_valid_date_filter = src_res.metadata.dates.all().filter(type='valid')
if src_res_valid_date_filter:
res_valid_date = src_res_valid_date_filter[0]
dest_res.metadata.create_element('date', type='valid', start_date=res_valid_date.start_date,
end_date=res_valid_date.end_date)
src_res_avail_date_filter = src_res.metadata.dates.all().filter(type='available')
if src_res_avail_date_filter:
res_avail_date = src_res_avail_date_filter[0]
dest_res.metadata.create_element('date', type='available',
start_date=res_avail_date.start_date,
end_date=res_avail_date.end_date)
# create the key/value metadata
dest_res.extra_metadata = copy.deepcopy(src_res.extra_metadata)
dest_res.save()
# TODO: should be BaseResource.mark_as_modified.
def resource_modified(resource, by_user=None, overwrite_bag=True):
"""
Set an AVU flag that forces the bag to be recreated before fetch.
This indicates that some content of the bag has been edited.
"""
resource.last_changed_by = by_user
resource.updated = now().isoformat()
# seems this is the best place to sync resource title with metadata title
resource.title = resource.metadata.title.value
resource.save()
if resource.metadata.dates.all().filter(type='modified'):
res_modified_date = resource.metadata.dates.all().filter(type='modified')[0]
resource.metadata.update_element('date', res_modified_date.id)
if overwrite_bag:
create_bag_files(resource)
# set bag_modified-true AVU pair for the modified resource in iRODS to indicate
# the resource is modified for on-demand bagging.
set_dirty_bag_flag(resource)
# TODO: should be part of BaseResource
def set_dirty_bag_flag(resource):
"""
Set bag_modified=true AVU pair for the modified resource in iRODS
to indicate that the resource is modified for on-demand bagging.
set metadata_dirty (AVU) to 'true' to indicate that metadata has been modified for the
resource so that xml metadata files need to be generated on-demand
This is done so that the bag creation can be "lazy", in the sense that the
bag is recreated only after multiple changes to the bag files, rather than
after each change. It is created when someone attempts to download it.
"""
res_coll = resource.root_path
istorage = resource.get_irods_storage()
res_coll = resource.root_path
istorage.setAVU(res_coll, "bag_modified", "true")
istorage.setAVU(res_coll, "metadata_dirty", "true")
def _validate_email(email):
try:
validate_email(email)
return True
except ValidationError:
return False
def get_profile(user):
return user.userprofile
def current_site_url():
"""Returns fully qualified URL (no trailing slash) for the current site."""
from django.contrib.sites.models import Site
current_site = Site.objects.get_current()
protocol = getattr(settings, 'MY_SITE_PROTOCOL', 'http')
port = getattr(settings, 'MY_SITE_PORT', '')
url = '%s://%s' % (protocol, current_site.domain)
if port:
url += ':%s' % port
return url
def get_file_mime_type(file_name):
# TODO: looks like the mimetypes module can't find all mime types
# We may need to user the python magic module instead
file_name = u"{}".format(file_name)
file_format_type = mimetypes.guess_type(file_name)[0]
if not file_format_type:
# TODO: this is probably not the right way to get the mime type
file_format_type = 'application/%s' % os.path.splitext(file_name)[1][1:]
return file_format_type
def check_file_dict_for_error(file_validation_dict):
if 'are_files_valid' in file_validation_dict:
if not file_validation_dict['are_files_valid']:
error_message = file_validation_dict.get('message',
"Uploaded file(s) failed validation.")
raise ResourceFileValidationException(error_message)
def raise_file_size_exception():
from .resource import FILE_SIZE_LIMIT_FOR_DISPLAY
error_msg = 'The resource file is larger than the supported size limit: %s.' \
% FILE_SIZE_LIMIT_FOR_DISPLAY
raise ResourceFileSizeException(error_msg)
def validate_resource_file_size(resource_files):
from .resource import check_resource_files
valid, size = check_resource_files(resource_files)
if not valid:
raise_file_size_exception()
# if no exception, return the total size of all files
return size
def validate_resource_file_type(resource_cls, files):
supported_file_types = resource_cls.get_supported_upload_file_types()
# see if file type checking is needed
if '.*' in supported_file_types:
# all file types are supported
return
supported_file_types = [x.lower() for x in supported_file_types]
for f in files:
file_ext = os.path.splitext(f.name)[1]
if file_ext.lower() not in supported_file_types:
err_msg = "{file_name} is not a supported file type for {res_type} resource"
err_msg = err_msg.format(file_name=f.name, res_type=resource_cls)
raise ResourceFileValidationException(err_msg)
def validate_resource_file_count(resource_cls, files, resource=None):
if len(files) > 0:
if len(resource_cls.get_supported_upload_file_types()) == 0:
err_msg = "Content files are not allowed in {res_type} resource"
err_msg = err_msg.format(res_type=resource_cls)
raise ResourceFileValidationException(err_msg)
err_msg = "Multiple content files are not supported in {res_type} resource"
err_msg = err_msg.format(res_type=resource_cls)
if len(files) > 1:
if not resource_cls.allow_multiple_file_upload():
raise ResourceFileValidationException(err_msg)
if resource is not None and resource.files.all().count() > 0:
if not resource_cls.can_have_multiple_files():
raise ResourceFileValidationException(err_msg)
def convert_file_size_to_unit(size, unit):
"""
Convert file size to unit for quota comparison
:param size: in byte unit
:param unit: should be one of the four: 'KB', 'MB', 'GB', or 'TB'
:return: the size converted to the pass-in unit
"""
unit = unit.lower()
if unit not in ('kb', 'mb', 'gb', 'tb'):
raise ValidationError('Pass-in unit for file size conversion must be one of KB, MB, GB, '
'or TB')
factor = 1024.0
kbsize = size / factor
if unit == 'kb':
return kbsize
mbsize = kbsize / factor
if unit == 'mb':
return mbsize
gbsize = mbsize / factor
if unit == 'gb':
return gbsize
tbsize = gbsize / factor
if unit == 'tb':
return tbsize
def validate_user_quota(user, size):
"""
validate to make sure the user is not over quota with the newly added size
:param user: the user to be validated
:param size: the newly added file size to add on top of the user's used quota to be validated.
size input parameter should be in byte unit
:return: raise exception for the over quota case
"""
if user:
# validate it is within quota hard limit
uq = user.quotas.filter(zone='myhpom_internal').first()
if uq:
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
hard_limit = qmsg.hard_limit_percent
used_size = uq.add_to_used_value(size)
used_percent = uq.used_percent
rounded_percent = round(used_percent, 2)
rounded_used_val = round(used_size, 4)
if used_percent >= hard_limit or uq.remaining_grace_period == 0:
msg_template_str = '{}{}\n\n'.format(qmsg.enforce_content_prepend,
qmsg.content)
msg_str = msg_template_str.format(email=user.email,
used=rounded_percent,
unit=uq.unit,
allocated=uq.allocated_value,
zone=uq.zone,
percent=rounded_percent)
raise QuotaException(msg_str)
def resource_pre_create_actions(resource_type, resource_title, page_redirect_url_key,
files=(), source_names=[], metadata=None,
requesting_user=None, **kwargs):
from.resource import check_resource_type
from hs_core.views.utils import validate_metadata
if __debug__:
assert(isinstance(source_names, list))
if not resource_title:
resource_title = 'Untitled resource'
else:
resource_title = resource_title.strip()
if len(resource_title) == 0:
resource_title = 'Untitled resource'
resource_cls = check_resource_type(resource_type)
if len(files) > 0:
size = validate_resource_file_size(files)
validate_resource_file_count(resource_cls, files)
validate_resource_file_type(resource_cls, files)
# validate it is within quota hard limit
validate_user_quota(requesting_user, size)
if not metadata:
metadata = []
else:
validate_metadata(metadata, resource_type)
page_url_dict = {}
# this is needed since raster and feature resource types allows to upload a zip file,
# then replace zip file with exploded files. If the zip file is loaded from hydroshare
# federation zone, the original zip file encoded in source_names gets deleted
# in this case and fed_res_path is used to keep the federation path, so that the resource
# will be stored in the federated zone rather than the hydroshare zone
fed_res_path = []
# receivers need to change the values of this dict if file validation fails
file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}
# Send pre-create resource signal - let any other app populate the empty metadata list object
# also pass title to other apps, and give other apps a chance to populate page_redirect_url
# if they want to redirect to their own page for resource creation rather than use core
# resource creation code
pre_create_resource.send(sender=resource_cls, metadata=metadata, files=files,
title=resource_title,
url_key=page_redirect_url_key, page_url_dict=page_url_dict,
validate_files=file_validation_dict,
source_names=source_names,
user=requesting_user, fed_res_path=fed_res_path, **kwargs)
if len(files) > 0:
check_file_dict_for_error(file_validation_dict)
return page_url_dict, resource_title, metadata, fed_res_path
def resource_post_create_actions(resource, user, metadata, **kwargs):
# receivers need to change the values of this dict if file validation fails
file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}
# Send post-create resource signal
post_create_resource.send(sender=type(resource), resource=resource, user=user,
metadata=metadata,
validate_files=file_validation_dict, **kwargs)
check_file_dict_for_error(file_validation_dict)
def prepare_resource_default_metadata(resource, metadata, res_title):
add_title = True
for element in metadata:
if 'title' in element:
if 'value' in element['title']:
res_title = element['title']['value']
add_title = False
else:
metadata.remove(element)
break
if add_title:
metadata.append({'title': {'value': res_title}})
add_language = True
for element in metadata:
if 'language' in element:
if 'code' in element['language']:
add_language = False
else:
metadata.remove(element)
break
if add_language:
metadata.append({'language': {'code': 'eng'}})
add_rights = True
for element in metadata:
if 'rights' in element:
if 'statement' in element['rights'] and 'url' in element['rights']:
add_rights = False
else:
metadata.remove(element)
break
if add_rights:
# add the default rights/license element
statement = 'This resource is shared under the Creative Commons Attribution CC BY.'
url = 'http://creativecommons.org/licenses/by/4.0/'
metadata.append({'rights': {'statement': statement, 'url': url}})
metadata.append({'identifier': {'name': 'hydroShareIdentifier',
'url': '{0}/resource/{1}'.format(current_site_url(),
resource.short_id)}})
# remove if there exists the 'type' element as system generates this element
# remove if there exists 'format' elements - since format elements are system generated based
# on resource content files
# remove any 'date' element which is not of type 'valid'. All other date elements are
# system generated
for element in list(metadata):
if 'type' in element or 'format' in element:
metadata.remove(element)
if 'date' in element:
if 'type' in element['date']:
if element['date']['type'] != 'valid':
metadata.remove(element)
metadata.append({'type': {'url': '{0}/terms/{1}'.format(current_site_url(),
resource.__class__.__name__)}})
metadata.append({'date': {'type': 'created', 'start_date': resource.created}})
metadata.append({'date': {'type': 'modified', 'start_date': resource.updated}})
# only add the resource creator as the creator for metadata if there is not already
# creator data in the metadata object
metadata_keys = [element.keys()[0].lower() for element in metadata]
if 'creator' not in metadata_keys:
creator_data = get_party_data_from_user(resource.creator)
metadata.append({'creator': creator_data})
def get_party_data_from_user(user):
party_data = {}
user_profile = get_profile(user)
user_full_name = user.get_full_name()
if user_full_name:
party_name = user_full_name
else:
party_name = user.username
party_data['name'] = party_name
party_data['email'] = user.email
party_data['description'] = '/hydroshare/user/{uid}/'.format(uid=user.pk)
party_data['phone'] = user_profile.phone_1
party_data['organization'] = user_profile.organization
return party_data
# TODO: make this part of resource api. resource --> self.
def resource_file_add_pre_process(resource, files, user, extract_metadata=False,
source_names=[], **kwargs):
if __debug__:
assert(isinstance(source_names, list))
resource_cls = resource.__class__
if len(files) > 0:
size = validate_resource_file_size(files)
validate_user_quota(resource.get_quota_holder(), size)
validate_resource_file_type(resource_cls, files)
validate_resource_file_count(resource_cls, files, resource)
file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}
pre_add_files_to_resource.send(sender=resource_cls, files=files, resource=resource, user=user,
source_names=source_names,
validate_files=file_validation_dict,
extract_metadata=extract_metadata, **kwargs)
check_file_dict_for_error(file_validation_dict)
# TODO: make this part of resource api. resource --> self.
def resource_file_add_process(resource, files, user, extract_metadata=False,
source_names=[], **kwargs):
from .resource import add_resource_files
if __debug__:
assert(isinstance(source_names, list))
folder = kwargs.pop('folder', None)
resource_file_objects = add_resource_files(resource.short_id, *files, folder=folder,
source_names=source_names)
# receivers need to change the values of this dict if file validation fails
# in case of file validation failure it is assumed the resource type also deleted the file
file_validation_dict = {'are_files_valid': True, 'message': 'Files are valid'}
post_add_files_to_resource.send(sender=resource.__class__, files=files,
source_names=source_names,
resource=resource, user=user,
validate_files=file_validation_dict,
extract_metadata=extract_metadata,
res_files=resource_file_objects, **kwargs)
check_file_dict_for_error(file_validation_dict)
resource_modified(resource, user, overwrite_bag=False)
return resource_file_objects
# TODO: move this to BaseResource
def create_empty_contents_directory(resource):
res_contents_dir = resource.file_path
istorage = resource.get_irods_storage()
if not istorage.exists(res_contents_dir):
istorage.session.run("imkdir", None, '-p', res_contents_dir)
def add_file_to_resource(resource, f, folder=None, source_name='',
move=False):
"""
Add a ResourceFile to a Resource. Adds the 'format' metadata element to the resource.
:param resource: Resource to which file should be added
:param f: File-like object to add to a resource
:param source_name: the logical file name of the resource content file for
federated iRODS resource or the federated zone name;
By default, it is empty. A non-empty value indicates
the file needs to be added into the federated zone, either
from local disk where f holds the uploaded file from local
disk, or from the federated zone directly where f is empty
but source_name has the whole data object
iRODS path in the federated zone
:param move: indicate whether the file should be copied or moved from private user
account to proxy user account in federated zone; A value of False
indicates copy is needed, a value of True indicates no copy, but
the file will be moved from private user account to proxy user account.
The default value is False.
:return: The identifier of the ResourceFile added.
"""
# importing here to avoid circular import
from hs_file_types.models import GenericLogicalFile
if f:
openfile = File(f) if not isinstance(f, UploadedFile) else f
ret = ResourceFile.create(resource, openfile, folder=folder, source=None, move=False)
# add format metadata element if necessary
file_format_type = get_file_mime_type(f.name)
elif source_name:
try:
# create from existing iRODS file
ret = ResourceFile.create(resource, None, folder=folder, source=source_name, move=move)
except SessionException as ex:
try:
ret.delete()
except Exception:
pass
# raise the exception for the calling function to inform the error on the page interface
raise SessionException(ex.exitcode, ex.stdout, ex.stderr)
# add format metadata element if necessary
file_format_type = get_file_mime_type(source_name)
else:
raise ValueError('Invalid input parameter is passed into this add_file_to_resource() '
'function')
# TODO: generate this from data in ResourceFile rather than extension
if file_format_type not in [mime.value for mime in resource.metadata.formats.all()]:
resource.metadata.create_element('format', value=file_format_type)
# if a file gets added successfully to composite resource, then better to set the generic
# logical file here
if resource.resource_type == "CompositeResource":
logical_file = GenericLogicalFile.create()
ret.logical_file_content_object = logical_file
ret.save()
return ret
def add_metadata_element_to_xml(root, md_element, md_fields):
"""
helper function to generate xml elements for a given metadata element that belongs to
'hsterms' namespace
:param root: the xml document root element to which xml elements for the specified
metadata element needs to be added
:param md_element: the metadata element object. The term attribute of the metadata
element object is used for naming the root xml element for this metadata element.
If the root xml element needs to be named differently, then this needs to be a tuple
with first element being the metadata element object and the second being the name
for the root element.
Example:
md_element=self.Creator # the term attribute of the Creator object will be used
md_element=(self.Creator, 'Author') # 'Author' will be used
:param md_fields: a list of attribute names of the metadata element (if the name to be used
in generating the xml element name is same as the attribute name then include the
attribute name as a list item. if xml element name needs to be different from the
attribute name then the list item must be a tuple with first element of the tuple being
the attribute name and the second element being what will be used in naming the xml
element)
Example:
[('first_name', 'firstName'), 'phone', 'email']
# xml sub-elements names: firstName, phone, email
"""
from lxml import etree
from hs_core.models import CoreMetaData
name_spaces = CoreMetaData.NAMESPACES
if isinstance(md_element, tuple):
element_name = md_element[1]
md_element = md_element[0]
else:
element_name = md_element.term
hsterms_newElem = etree.SubElement(root,
"{{{ns}}}{new_element}".format(
ns=name_spaces['hsterms'],
new_element=element_name))
hsterms_newElem_rdf_Desc = etree.SubElement(
hsterms_newElem, "{{{ns}}}Description".format(ns=name_spaces['rdf']))
for md_field in md_fields:
if isinstance(md_field, tuple):
field_name = md_field[0]
xml_element_name = md_field[1]
else:
field_name = md_field
xml_element_name = md_field
if hasattr(md_element, field_name):
attr = getattr(md_element, field_name)
if attr:
field = etree.SubElement(hsterms_newElem_rdf_Desc,
"{{{ns}}}{field}".format(ns=name_spaces['hsterms'],
field=xml_element_name))
field.text = str(attr)
class ZipContents(object):
"""
Extract the contents of a zip file one file at a time
using a generator.
"""
def __init__(self, zip_file):
self.zip_file = zip_file
def black_list_path(self, file_path):
return file_path.startswith('__MACOSX/')
def black_list_name(self, file_name):
return file_name == '.DS_Store'
def get_files(self):
temp_dir = tempfile.mkdtemp()
try:
file_path = None
for name_path in self.zip_file.namelist():
if not self.black_list_path(name_path):
name = os.path.basename(name_path)
if name != '':
if not self.black_list_name(name):
self.zip_file.extract(name_path, temp_dir)
file_path = os.path.join(temp_dir, name_path)
logger.debug("Opening {0} as File with name {1}".format(file_path,
name_path))
f = File(file=open(file_path, 'rb'),
name=name_path)
f.size = os.stat(file_path).st_size
yield f
finally:
shutil.rmtree(temp_dir)
def get_file_storage():
return IrodsStorage() if getattr(settings, 'USE_IRODS', False) else DefaultStorage()
def resolve_request(request):
if request.POST:
return request.POST
if request.data:
return request.data
return {}
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018 Audun Gravdal Johansen
"""Classes to operate on SESAM structural data.
"""
from __future__ import division
import numpy as np
from .sifdata import SifData
from .helpers import getrow
from ..exceptions import HierarchyError, NoSuchRecordError, ResultError
class StrucData(SifData):
"""Base class for the StrucData classes.
"""
def __init__(self, tbgroup, fileinstance, toplevel=None):
"""
Parameters
----------
toplevel : TopLevelData
If this superelement is part of a superelement hierarchy and is not
the top level superelement, provide the top level data object
"""
super(StrucData, self).__init__(tbgroup, fileinstance)
self._toplevel = toplevel
def _get_setmember_indices(self, setnames, istype=2):
"""Get (zero-based) indices of set members.
if istype=1: indices are into the gnode/gcoord tables (nodes),
if istype=2 (default): indices are into the gelmnt1/gelref1 tables
(elements)
"""
tdsetnam, tdsetnam_text = self._get_record('tdsetnam')
gsetmemb, irmemb = self._get_record('gsetmemb')
setnames = (setnames,) if isinstance(setnames, str) else setnames
setmembs = []
for setname in setnames:
cond = 'name==b{}'.format(repr(setname))
isref = getrow(tdsetnam, cond)['idno']
cond = '(isref=={})&(istype=={})'.format(isref, istype)
for r in gsetmemb.where(cond):
setmembs.append(irmemb[r['irmemb_start']:r['irmemb_stop']])
return np.unique(np.concatenate(setmembs)) - 1
def _get_elementkind_indices(self, kind, elemindices=None):
"""Get (zero-based) indices of elements of a certain *kind*.
Legal alternatives for *kind* are 'beam' and 'shell'
Optionally provide *elemindices* which can be a subset of the complete
set of element indices.
"""
gelmnt1, nodin = self._get_record('gelmnt1')
# # This is slower:
# if kind == 'beam':
# cond = '(eltyp==15) | (eltyp==23)'
# elif kind == 'shell':
# cond = '(eltyp==24) | (eltyp==25) | (eltyp==26) | (eltyp==28)'
# else:
# raise ValueError("kind must be 'beam' or 'shell'")
# return gelmnt1.get_where_list(cond)
if elemindices is not None:
arr = gelmnt1[:][elemindices]
else:
arr = gelmnt1[:]
if kind == 'beam':
return arr[(arr['eltyp']==15) | (arr['eltyp']==23)]['elno'] - 1
elif kind == 'shell':
return arr[(arr['eltyp']==24) | (arr['eltyp']==25) |
(arr['eltyp']==26) | (arr['eltyp']==28) ]['elno'] - 1
else:
raise ValueError("kind must be 'beam' or 'shell'")
def _get_elementindices(self, sets=None, kind=None):
"""
"""
if sets:
elemindices = self._get_setmember_indices(sets)
if sets and kind:
elemindices = self._get_elementkind_indices(kind, elemindices)
elif kind:
elemindices = self._get_elementkind_indices(kind)
elif not sets:
gelmnt1, nodin = self._get_record('gelmnt1')
elemindices = gelmnt1.col('elno') - 1
return elemindices
def _get_connectivity(self, elemindices):
"""Get connectivity for elements given by *elemindices*.
Returns a tuple *(con, offset)* where *con* is a 1d array with
connectivity (nodeindices) for each element given sequentially.
*offset* is a 1d array with indices into *con* giving the start index
for each element.
"""
gelmnt1, nodin = self._get_record('gelmnt1')
con, offset = self._get_varlendata(gelmnt1, nodin, elemindices)
con -= 1 # change to zero-based
return con, offset
def _update_connectivity(self, con):
"""When elements are a subset (i.e. using sets or kind), the
connectivity need to be updated to match the node array for the subset.
"""
nodeindices = np.unique(con)
indexmap = dict(zip(nodeindices, np.arange(len(nodeindices))))
indexmapper = np.vectorize(lambda x: indexmap[x])
return indexmapper(con)
def _get_nodeindices_ofelems(self, elemindices, disconnected=False):
"""Get (zero-based) indices (into gnode/gcoord table) of nodes used by
elements given by *elemindices*.
"""
con, offset = self._get_connectivity(elemindices)
if disconnected:
return con
else:
return np.unique(con)
def _get_nodeindices(self, sets=None, kind=None, disconnected=False):
"""Get (zero-based) indices into gnode/gcoord table
"""
gcoord = self._get_record('gcoord')
gelmnt1, nodin = self._get_record('gelmnt1')
if sets or kind:
elemindices = self._get_elementindices(sets, kind)
con, _ = self._get_connectivity(elemindices)
# con = self._update_connectivity(con)
if disconnected:
return con
else:
return np.unique(con)
else:
if disconnected:
return nodin[:] - 1
else:
return np.arange(len(gcoord))
def _get_resrefs(self, run=1, rescases=None):
"""Returns resref records for the given run and external result case
number(s) as a numpy structured array. *rescases* can be None (default,
returns all), int or sequence of ints.
"""
rdresref, reftyps = self._get_record('rdresref')
if isinstance(rescases, (list, tuple, np.ndarray)):
rescases = set(rescases)
elif isinstance(rescases, int):
rescases = set([rescases])
elif rescases is None:
indices = rdresref.get_where_list('irno=={}'.format(run))
return rdresref[indices]
resrefs = []
for row in rdresref.iterrows():
if row['irno'] == run and row['ieres'] in rescases:
resrefs.append(row.fetch_all_fields())
return np.array(resrefs)
def _check_complex_and_get_flag(self, complexflags):
complexflag = complexflags[0]
if not np.all(complexflags == complexflag):
raise ResultError(
'Result cases must all be either real or complex')
return complexflag
def get_setnames(self):
"""Get names of all sets in superelement.Returns None if no sets exist
"""
try:
return list(self._get_record('tdsetnam')[0].col('name'))
except NoSuchRecordError:
pass
class HigherLevelData(StrucData):
"""Base class for higher level structural data
"""
# this class should contain many of the same methods as FirstLevelData,
# but with the effect that it calls the method on all children and returns
# a dict of data..
# should this dict be nested? or skip intermediate levels
# is there a difference between input data and results data?
# should call methods directly on FirstLevelData and use hierarchy data to
# resolve which objects to call. This to have uniform behaviour for input
# and results data (as intermediate levels are not physically present on
# results data)
# an optional 'only' argument to filter which superelements to get data
# from
# the 'sets' argument probably dont make sense.. or? lets keep it: if any
# of the sets are present on a superelement, data is returned, if not,
# no data is returned for that superelement
def get_nodes(self, only=None, sets=None, index=1, trans=None,
concepts=False, disconnected=False):
pass
class TopLevelData(HigherLevelData):
"""
"""
# hierarchy and transformation data should be kept/gotten here..
def get_transformation(self, seltyp, index, level='top'):
"""Get transformation matrix from 1stlevel to level(int or 'top')
"""
hierarch, ihsref = self._get_record('hierarch')
hsuptran = self._get_record('hsuptran')
toplevel = hierarch[0]['islevl']
if level == 'top':
level = toplevel
elif level == 1:
return np.diag(np.ones(4, 'f'))
elif level > toplevel:
raise HierarchyError('level {} does not exist, toplevel is '
'{}'.format(level, toplevel))
elif level < 1:
raise HierarchyError('level must be 1 or above, got '
'{}'.format(level))
cond = '(iselty=={})&(indsel=={})'.format(seltyp, index)
r = getrow(hierarch, cond)
t = getrow(hsuptran, 'itref=={}'.format(r['itref']))['t']
rp = getrow(hierarch, 'ihref=={}'.format(r['ihpref']))
if rp['islevl'] > level:
raise HierarchyError('Superelement {} with index {} not present on'
' level {}'.format(seltyp, index, level))
while rp['islevl'] < level:
t2 = getrow(hsuptran, 'itref=={}'.format(rp['itref']))['t']
t = np.dot(t, t2)
if rp['ihpref'] != 0:
rp = getrow(hierarch, 'ihref=={}'.format(rp['ihpref']))
if rp['islevl'] > level:
raise HierarchyError('Superelement {} with index {} not '
'present on level {}'.format(seltyp,
index,
level))
return t
class InterLevelData(HigherLevelData):
"""
"""
pass
class FirstLevelData(StrucData):
"""
"""
# extrapolation matrices for 2nd order elements
s3 = np.sqrt(3)
# 8-node quads
xquad = np.array(
[[(1+s3)*(1+s3), (1-s3)*(1+s3), (1+s3)*(1-s3), (1-s3)*(1-s3)],
[(1+s3), (1+s3), (1-s3), (1-s3) ],
[(1-s3)*(1+s3), (1+s3)*(1+s3), (1-s3)*(1-s3), (1+s3)*(1-s3)],
[(1-s3), (1+s3), (1-s3), (1+s3) ],
[(1-s3)*(1-s3), (1+s3)*(1-s3), (1-s3)*(1+s3), (1+s3)*(1+s3)],
[(1-s3), (1-s3), (1+s3), (1+s3) ],
[(1+s3)*(1-s3), (1-s3)*(1-s3), (1+s3)*(1+s3), (1-s3)*(1+s3)],
[(1+s3), (1-s3), (1+s3), (1-s3) ]]) * .25
# 6-node triangles
xtri = np.array([[ 5./3, -1./3, -1./3],
[ 2./3, 2./3, -1./3],
[-1./3, 5./3, -1./3],
[-1./3, 2./3, 2./3],
[-1./3, -1./3, 5./3],
[ 2./3, -1./3, 2./3]])
# 3-node beam
xbm3 = np.array([[1. + s3, 1. - s3],
[1. , 1. ],
[1. - s3, 1. + s3]]) * .5
# index arrays for 1st order shells
# (indices of result points located at nodes)
idxtri = np.array([0, 4, 1, 5, 3, 7])
idxquad = np.array([0, 5, 1, 6, 4, 9, 3, 8])
# transformation matrices for decomposision of stresses
# 1st order plates
x_decomp_thin = np.array([
[ 0.5, 0. , 0. , 0.5, 0. , 0. ],
[ 0. , 0.5, 0. , 0. , 0.5, 0. ],
[ 0. , 0. , 0.5, 0. , 0. , 0.5],
[-0.5, 0. , 0. , 0.5, 0. , 0. ],
[ 0. , -0.5, 0. , 0. , 0.5, 0. ],
[ 0. , 0. , -0.5, 0. , 0. , 0.5]])
# 2nd order shells
x_decomp_thick = np.array([
[ 0.5, 0. , 0. , 0. , 0. , 0.5, 0. , 0. , 0. , 0. ],
[ 0. , 0.5, 0. , 0. , 0. , 0. , 0.5, 0. , 0. , 0. ],
[ 0. , 0. , 0.5, 0. , 0. , 0. , 0. , 0.5, 0. , 0. ],
[-0.5, 0. , 0. , 0. , 0. , 0.5, 0. , 0. , 0. , 0. ],
[ 0. , -0.5, 0. , 0. , 0. , 0. , 0.5, 0. , 0. , 0. ],
[ 0. , 0. , -0.5, 0. , 0. , 0. , 0. , 0.5, 0. , 0. ],
[ 0. , 0. , 0. , 1.5, 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 1.5, 0. , 0. , 0. , 0. , 0. ]])
def __init__(self, tbgroup, toplevel=None):
super(FirstLevelData, self).__init__(tbgroup, toplevel)
if self._filetype == 'R':
self._index = tbgroup._f_getattr('INDEX')
# setup dict of element result processing functions
self._process_eltyp = {15 : self._process_eltyp15,
23 : self._process_eltyp23,
24 : self._process_eltyp24,
25 : self._process_eltyp25,
26 : self._process_eltyp26,
28 : self._process_eltyp28}
def _process_element_results(self, eltyp, restype, pos, d):
"""
"""
return self._process_eltyp[eltyp](restype, pos, d)
def _process_eltyp15(self, restype, pos, d):
"""BEAS, 3D Beam (2-node)"""
# it seems this element has 3 result points.
# return first and last...
return d.reshape(3, 6)[[0,2],:]
def _process_eltyp23(self, restype, pos, d):
"""BTSS, General Curved Beam (3-node)"""
d.shape = (2, 6)
if pos == 'nodes':
return np.dot(self.xbm3, d)
return d
def _process_eltyp24(self, restype, pos, d):
"""FQUS, Flat Quadrilateral Thin Shell (4-node)"""
d = d.reshape(10, 3)[self.idxquad,:].reshape(4, 2, 3)
if restype == 'decomposedstress':
d = self._decompose_stresses_thin(d) # shape is (4, 6)
return d
def _process_eltyp25(self, restype, pos, d):
"""FTRS, Flat Triangular Thin Shell (3-node)"""
d = d.reshape(8, 3)[self.idxtri,:].reshape(3, 2, 3)
if restype == 'decomposedstress':
d = self._decompose_stresses_thin(d) # shape is (4, 6)
return d
def _process_eltyp26(self, restype, pos, d):
"""SCTS, Subparametric Curved Triangular Thick Shell (6-node)"""
d.shape = (2, 3, 5) # (side, respt, comp)
if pos == 'respts':
return np.rollaxis(d, 1)
elif pos == 'nodes':
d[...,:3] = self._extrapolate_to_surface(d[...,:3])
if restype == 'generalstress':
d[...,3:] = 0.
elif restype == 'decomposedstress':
d = self._decompose_stresses_thick(d) # shape is (4, 8)
# extrapolate to nodes:
return np.dot(self.xtri, d) # shape is (6, 2, 5)
def _process_eltyp28(self, restype, pos, d):
"""SCQS, Subparametric Curved Quadrilateral Thick Shell (8-node)"""
d.shape = (2, 4, 5) # (side, respt, comp)
if pos == 'respts':
return np.rollaxis(d, 1)
elif pos == 'nodes':
d[...,:3] = self._extrapolate_to_surface(d[...,:3])
if restype == 'generalstress':
d[...,3:] = 0.
elif restype == 'decomposedstress':
d = self._decompose_stresses_thick(d) # shape is (4, 8)
# extrapolate to nodes:
return np.dot(self.xquad, d) # shape is (8, 2, 5)
def _extrapolate_to_surface(self, d):
lp, up = d[0], d[1]
a = self.s3 / 3
ls = lp + (-1 + a) / (2*a) * (up - lp)
us = lp + (1 + a) / (2*a) * (up - lp)
return np.array((ls, us))
def _decompose_stresses_thick(self, d):
return np.dot(self.x_decomp_thick, np.rollaxis(d,1).reshape(-1,10).T).T
def _decompose_stresses_thin(self, d):
return np.dot(self.x_decomp_thin, d.reshape(-1,6).T).T
def get_nodes(self, sets=None, kind=None, disconnected=False, trans=None,
index=1):
"""Get node coordinates.
Parameters
----------
sets : str or sequence of str
set name or sequence of set names. If sets=None (default), all
nodes in the superelement is returned.
kind : str
'beam', 'shell' or None (None is default, returns all kinds)
disconnected : bool
if True, elements do not share nodes. Instead each element has its
own set of nodes. This results in more nodes than in the original
data.
trans : numpy.ndarray, int or 'top'
| four calling patterns are supported:
| 1: provide a 4x4 transformation matrix
| 2: provide the hierarchy level (int)
| 3: provide 'top' to specify the top level
| 4: None (default): No transformation is applied
index : int
superelement index number. Relevant when applying hierarchy
transformations (T-files only). Default is 1.
Returns
-------
coords : numpy.ndarray
A 2d array with shape (nnodes, 3), where the second axis represent
the x, y and z coordinates.
"""
# index is redundant for resultsdata and non-hierarchy inputdata
gcoord = self._get_record('gcoord')
nodeindices = self._get_nodeindices(sets, kind, disconnected)
recs = gcoord[:].take(nodeindices, axis=0) # is allegedly faster
# add a column of ones in order to perform affine transformation
coords = np.column_stack((recs['xcoord'],
recs['ycoord'],
recs['zcoord'],
np.ones(len(recs), 'f')))
# transformation
if trans is not None:
if self._filetype == 'R':
index = self._index
if isinstance(trans, np.ndarray):
if trans.shape != (4,4):
raise ValueError(
'Transformation matrix must have shape (4,4), got '
'({},{})'.format(*trans.shape))
elif self._toplevel:
if isinstance(trans, str):
if trans == 'top':
trans = self._toplevel.get_transformation(self._seltyp,
index, 'top')
else:
raise ValueError('legal string argument: "top"')
elif isinstance(trans, int):
trans = self._toplevel.get_transformation(self._seltyp,
index, trans)
elif self._has_record('rsuptran') and trans=='top':
trans = self._get_record('rsuptran')[0]['t']
else:
raise HierarchyError(
'{0}: No transformation data for trans={1}. {0} has no '
'toplevel reference.'.format(self.name, trans))
coords[:] = np.dot(coords, trans)
return coords[:,:3]
def get_nodenumbers(self, sets=None, kind=None, disconnected=False,
numbertype='external'):
"""Get internal or external (default) node numbers.
"""
gnode = self._get_record('gnode')
if numbertype == 'external':
col = 'nodex'
elif numbertype == 'internal':
col = 'nodeno'
else:
raise ValueError("numbertype must be 'internal' or 'external'")
if any((sets, kind, disconnected)):
nodeindices = self._get_nodeindices(sets, kind, disconnected)
return gnode.col(col).take(nodeindices)
else:
return gnode.col(col)
def get_noderesults(self, restype, run=1, rescases=None,
sets=None, kind=None, disconnected=False):
"""Get node result data.
Parameters
----------
restype : str
'displacement', 'velocity' or 'acceleration'
run : int
Analysis run number (default is 1)
rescases : int, sequence or None
External result case number(s)
sets : str or sequence of str
set name or sequence of set names. If sets=None (default), all
elements in the current dataset is returned.
kind : str
'beam', 'shell' or None (None is default, returns all kinds)
disconnected : bool
if True, elements do not share nodes. Instead each element has its
own set of nodes. This results in more nodes than in the original
data.
Returns
-------
result : numpy.ndarray
A 3d array with shape (nrescases, nnodes, 6), where the last axis
represent the six degrees of freedom for the result type.
"""
if restype == 'displacement':
rvnodres, res = self._get_record('rvnoddis')
start, stop = 'dis_start', 'dis_stop'
elif restype == 'velocity':
rvnodres, res = self._get_record('rvnodvel')
start, stop = 'vel_start', 'vel_stop'
elif restype == 'acceleration':
rvnodres, res = self._get_record('rvnodacc')
start, stop = 'acc_start', 'acc_stop'
else:
raise ValueError(
"restype must be 'displacement', 'velocity' or 'acceleration'")
resrefs = self._get_resrefs(run, rescases)
complexflag = self._check_complex_and_get_flag(resrefs['icompl'])
# get data
nodenumbs = set(
self.get_nodenumbers(sets, kind, numbertype='internal'))
ires = set(resrefs['ires'])
data = []
for r in rvnodres.iterrows():
if r['ires'] in ires and r['iinod'] in nodenumbs:
data.append(res[r[start]:r[stop]]) # lookup in res is slow..
data = np.concatenate(data)
# change dtype if complex
if complexflag:
data.dtype = np.complex64
# reshape
ncomps = 6 # for now assume allways 6 comps for node results..
nres = len(ires) # TODO: need an axis for rescase!
if nres > 1:
data.shape = (nres, -1, ncomps)
else:
data.shape = (-1, ncomps)
# disconnect
if disconnected:
indices = self._get_nodeindices(sets, kind, disconnected)
indices = self._update_connectivity(indices)
axis = 0 if nres == 1 else 1
return data.take(indices, axis=axis)
return data
# - get internal rescase number(s) from external and run no.
# e.g. self._get_ires(run, rescases)
# - check that results are either real or complex
# - get internal nodenumbers
# - iterate rvnodres:
# data = []
# if r[ires] in ires_set and r[iinod] in nodenumbers:
# data.append(res[r[start]:r[stop]])
# data = np.concatenate(data)
# - reshape according to number of components
# - if disconnected:
# indices = self._get_nodeindices(sets, kind, disconnected)
# data.take(indices, axis=0)
# TODO:
# sets, kind and disconnected are used everywhere, could it
# optionally be set global defaults for these ?
# fs.set_defaults(sets=[...], kind='shell', disconnected=True)
# Then at the beginning in the functions who have these parameters:
# sets, kind, disconnected = self._get_global_defaults(
# sets, kind, disconnected)
# if a value is passed to the function (other than the local default)
# this will override the global default and the argument will be
# returned unchanged. Same if a global default have not
# been set.
# result types:
# displacement
# velocity
# acceleration
# reactions ?
# nodal average of element results (coplanar elements only)
# rescase patterns:
# None (default) returns all
# rescase (int): choose a single resultcase
# sequence of rescases (ints): (1,4,5,6)
def get_elements(self, sets=None, kind=None, disconnected=False):
"""Get element connectivity.
Parameters
----------
sets : str or sequence of str
set name or sequence of set names. If sets=None (default), all
elements in the current dataset is returned.
disconnected : bool
if True, elements do not share nodes. Instead each element has its
own set of nodes. This results in more nodes than in the original
data.
kind : str
'beam', 'shell' or None (None is default, returns all kinds)
Returns
-------
Returns three 1d arrays:
connectivity : numpy.ndarray
Element definitions are given sequentially as indices into a
corresponding *coords* array, ref *get_nodes* method. the
*get_nodes* method must have been called with the same values for
*sets*, *disconnected*, and *kind*.
offset : numpy.ndarray
indices into *connectivity* representing the end of an element
definition. This array has length equal to the number of elements
returned.
eltyp : numpy.ndarray
element type id. corresponds to the *offset* array. See table below
for the supported element types.
Supported element types
-----------------------
==== ====== ============ ==============================================
Name Id No. of nodes Description
==== ====== ============ ==============================================
BEAS 15 2 3D 2 Node Beam
BTSS 23 3 General Curved Beam
FQUS 24 4 Flat Quadrilateral Thin Shell
FTRS 25 3 Flat Triangular Thin Shell
SCTS 26 6 Subparametric Curved Triangular Thick Shell
SCQS 28 8 Subparametric Curved Quadrilateral Thick Shell
==== ====== ============ ==============================================
"""
# TODO: should disconnected be True as default? If this turns out to be
# the most common use. Will this always be True when working with
# element results?
gelmnt1, nodin = self._get_record('gelmnt1')
if sets or kind:
elemindices = self._get_elementindices(sets, kind)
con, offset = self._get_connectivity(elemindices)
eltyp = gelmnt1.col('eltyp')[elemindices]
if disconnected:
con = np.arange(len(con))
else:
con = self._update_connectivity(con)
else:
if disconnected:
con = np.arange(len(nodin))
else:
con = nodin[:] - 1
offset = gelmnt1.col('nodin_stop')
eltyp = gelmnt1.col('eltyp')
return con, offset, eltyp
def get_elementnumbers(self, sets=None, kind=None, numbertype='external'):
"""Get internal or external (default) element numbers.
"""
gelmnt1, nodin = self._get_record('gelmnt1')
if numbertype == 'external':
col = 'elnox'
elif numbertype == 'internal':
col = 'elno'
else:
raise ValueError("numbertype must be 'internal' or 'external'")
if sets or kind:
elemindices = self._get_elementindices(sets, kind)
return gelmnt1.col(col).take(elemindices)
else:
return gelmnt1.col(col)
def get_elementresults(self, restype, pos='nodes', run=1, rescases=None,
sets=None):
"""Get element result data.
Parameters
----------
restype : str
'beamforce', 'generalstress' or 'decomposedstress'
pos : str
'nodes' (default), 'respts' (not supported yet) or average
(not supported yet)
run : int
Analysis run number (default is 1)
rescases : int, sequence or None
External result case number(s)
sets : str or sequence of str
optionally limit the data by set(s)
Returns
-------
result : numpy.ndarray
The shape of the returned array will depend on the provided values
for the *restype* and *pos* parameters. See below for description
of the different types.
Result type: Beam Force
-----------------------
...
===== ===== ========== ==========================================
Pos. Elem. No. of No. of comps.
Type Res. pts.
===== ===== ========== ==========================================
nodes BEAS 2 6 (nxx, nxy, nxz, mxx, mxy, mxz)
nodes BTSS 3 6 (nxx, nxy, nxz, mxx, mxy, mxz)
===== ===== ========== ==========================================
Result type: General Stress
---------------------------
...
===== ===== ========== ==========================================
Pos. Elem. No. of No. of comps.
Type Res. pts.
===== ===== ========== ==========================================
nodes FQUS 4 3 (sigxx, sigyy, tauxy)
nodes FTRS 3 3 (sigxx, sigyy, tauxy)
nodes SCTS 6 5 (sigxx, sigyy, tauxy, tauxz, tauyz)
nodes SCQS 8 5 (sigxx, sigyy, tauxy, tauxz, tauyz)
===== ===== ========== ==========================================
"""
if restype == 'beamforce':
rvres, res = self._get_record('rvforces')
start, stop = 'force_start', 'force_stop'
kind = 'beam'
elif restype in ('generalstress', 'decomposedstress'):
rvres, res = self._get_record('rvstress')
start, stop = 'stress_start', 'stress_stop'
kind = 'shell'
else:
raise ValueError(
'restype={!r} not supported'.format(restype))
resrefs = self._get_resrefs(run, rescases)
complexflag = self._check_complex_and_get_flag(resrefs['icompl'])
# get data
elemnumbs = set(
self.get_elementnumbers(sets, kind, numbertype='internal'))
ires = set(resrefs['ires'])
data = []
for r in rvres.iterrows():
if r['ires'] in ires and r['iielno'] in elemnumbs:
eltyp = r['ir']
d = res[r[start]:r[stop]]
if complexflag:
d.dtype = np.complex64
data.append(
self._process_element_results(eltyp, restype, pos, d))
data = np.concatenate(data) # shape is (nres*npos, comp)
# or (nres*npos, surface, comp)
# reshape
if len(ires) > 1:
if restype == 'generalstress':
ncomps = data.shape[2]
data.shape = (len(ires), -1, 2, ncomps)
else:
ncomps = data.shape[1]
data.shape = (len(ires), -1, ncomps)
return data
# this function could also support 'kind':
# The purpose would be (for kind=None (all)) to have same shape datasets
# for both beam and shell results which then could be used with a single
# dataset for nodes and elements. irrelevant element types would be
# given zero or nan values.
# - get internal rescase number(s) from external and run no.
# e.g. self._get_ires(run, rescases)
# - check that results are either real or complex
# - get internal elementnumbers
# - get element types: eltyps = np.unique(rvstress.col('ir'))
# - iterate rvres:
# - Alt 1 separate elementtypes
# data = {}
# for eltyp in eltyps:
# data[eltyp] = []
# if r[ires] in ires_set and r[iinod] in nodenumbers:
# data[r[ir]].append(res[r[start]:r[stop]])
# for k,v in data.iteritems:
# data[k] = np.concatenate(v)
# - we now have data for each element type, which is needed to
# perform extrapolation etc..
# - but have we lost the original order??
# - data must be put back in original order berfore returning...
# - but reshaped first ?
# - Alt 2 perform manipulations inside loop
# data = []
# if r[ires] in ires_set and r[iinod] in nodenumbers:
#
# d = res[r[start]:r[stop]]
#
# perform processing:
# extrapolation etc..
#
# data.append(d)
# data = np.concatenate(data)
# change dtype if complex
# reshape data according to no. of components
# must ensure that all element types have the same number of
# components...
# Sequence of stress computations:
# 1. stresses at result points (as on resultfile)
# 2. surface stresses at result points (extrapolation for 2nd order)
# 3. element (node upper/lower) stresses (extrapolation to nodes)
# 4. element average (at upper/lower) by averaging elem corner vals
# 5. nodal average
# 6. decomposed stresses (bending/membrane)
# 7. Combinations (complex results must be evaluated/expanded)
# 8. principal and von mises stress calc
# kind:
# beam
# shell
# solid
# restypes:
# beamforce (beams)
# beamstress (beams)
# generalstress (shell and solid)
# decomposedstress (i.e. membrane/bending) (shell only)
# surfaceloads (shell and solid)
# pos:
# node (default)
# respt
# average
# layer: (consider returning both?)
# upper
# lower
# assume all elements have same (number of) components
# TODO
# provide functions to operate on result data from get_element results?
# freesif.calc.vonmises(arr, ...)
# freesif.calc.principal(arr, ...)
# freesif.calc.complex_eval(arr, phase)
# freesif.calc.complex_expand(arr, phase_step=10)
def get_concepts(self, sets=None):
pass
def get_conceptresults(self, sets=None):
pass
def get_conceptnames(self, sets=None):
pass
def get_section_properties(self, sets=None):
"""Get beam section properties
"""
gelref1, geono, fixno, eccno, transno = self._get_record('gelref1')
gbeamg = self._get_record('gbeamg')
gbeamg_arr = gbeamg[:]
elemindices = self._get_elementindices(sets, 'beam')
geono_bm = gelref1.col('geono').take(elemindices)
gbeamg_indices = dict(zip(gbeamg.col('geono'), range((len(gbeamg)))))
return np.array([gbeamg_arr[gbeamg_indices[gbm]] for gbm in geono_bm])
def get_shell_thicknesses(self, sets=None):
"""Get shell thicknesses
"""
gelref1, geono, fixno, eccno, transno = self._get_record('gelref1')
gelth = self._get_record('gelth')
gelth_arr = gelth.col('th')
elemindices = self._get_elementindices(sets, 'shell')
geono_sh = gelref1.col('geono').take(elemindices)
gelth_indices = dict(zip(gelth.col('geono'), range((len(gelth)))))
return np.array([gelth_arr[gelth_indices[gsh]] for gsh in geono_sh])
# to be memory efficient:
# iter_noderesults(), iter_elementresults(), iter_conceptresults() ??
def get_properties(self, prop_type, sets=None, concepts=False):
# choose per element or per concept
# one structured array with properties, and another array of ints
# corresponding to elements/concepts array pointing into the properties
# array
# only relevant for multi field properties like section, material etc?
pass
def get_resultcase_info(self, run=None, rescases=None):
pass
def get_result_component_names(self, restype):
pass
def get_resultcase_names(self, rescases=None):
"""Get loadcase names.
Parameters
----------
rescases : int, sequence or None
External result case number(s)
Returns
-------
result : dict
Result case numbers as keys, result case names as values.
"""
loadtable, _ = self._get_record('tdload')
idnos = loadtable.col('idno')
names = loadtable.col('name')
if rescases is None:
pass
else:
extract_elements = np.isin(idnos, rescases)
idnos = idnos[extract_elements]
names = names[extract_elements]
return dict(zip(idnos, names))
|
|
import os
import time
import sys
import requests
import random
from azure.mgmt.common import SubscriptionCloudCredentials
import azure.mgmt.compute
import azure.mgmt.network
import azure.mgmt.resource
import azure.mgmt.storage
class azure_api:
def __init__(self, subscripid, endpoint_uri, app_id, app_secret_key):
self.region = 'eastus'
# make sure you export these four env variables before you run this code
self.subscription_id=subscripid
self.AZURE_ENDPOINT_URL=endpoint_uri
self.AZURE_APP_ID=app_id
self.AZURE_APP_SECRET=app_secret_key
# create a token, you need to get create and assign role to an application before use this module
# check Readme.md in home directory
self.auth_token = self.get_token_from_client_credentials(endpoint=self.AZURE_ENDPOINT_URL,
client_id=self.AZURE_APP_ID,
client_secret=self.AZURE_APP_SECRET)
# create a cred using the token
self.creds = SubscriptionCloudCredentials(self.subscription_id, self.auth_token)
# now it is the time to manage the resource
self.compute_client = azure.mgmt.compute.ComputeManagementClient(self.creds)
self.network_client = azure.mgmt.network.NetworkResourceProviderClient(self.creds)
self.resource_client = azure.mgmt.resource.ResourceManagementClient(self.creds)
def get_token_from_client_credentials(self, endpoint, client_id, client_secret):
payload = {
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
'resource': 'https://management.core.windows.net/',
}
response = requests.post(endpoint, data=payload).json()
return response['access_token']
# create nic for your vm.
def create_network_interface(self, network_client, region, group_name, interface_name,
network_name, subnet_name, ip_name):
# Create or update a virtual network
# All the nic/ip addresses used by our virtual machines will be put into this virtual network
result = self.network_client.virtual_networks.create_or_update(
group_name,
network_name,
azure.mgmt.network.VirtualNetwork(
location=region,
address_space=azure.mgmt.network.AddressSpace(
address_prefixes=[
'10.1.0.0/16',
],
),
subnets=[
azure.mgmt.network.Subnet(
name=subnet_name,
address_prefix='10.1.0.0/24',
),
],
),
)
result = self.network_client.subnets.get(group_name, network_name, subnet_name)
subnet = result.subnet
# Create a new ip address resource
# In azure, we need to create this resource before we create a Network Interface
result = self.network_client.public_ip_addresses.create_or_update(
group_name,
ip_name,
azure.mgmt.network.PublicIpAddress(
location=region,
public_ip_allocation_method='Dynamic',
idle_timeout_in_minutes=4,
dns_settings=azure.mgmt.network.networkresourceprovider.PublicIpAddressDnsSettings(
domain_name_label=ip_name, # use ip name as its dns prefix
),
),
)
result = self.network_client.public_ip_addresses.get(group_name, ip_name)
public_ip_id = result.public_ip_address.id
# Create a network interface card using the ip address we just created
# this nic will be put into the virtual network we just create
result = self.network_client.network_interfaces.create_or_update(
group_name,
interface_name,
azure.mgmt.network.NetworkInterface(
#### updated
dns_settings=azure.mgmt.network.networkresourceprovider.DnsSettings(
dns_servers=[
'8.8.8.8'
]
),
#### updated
name=interface_name,
location=region,
ip_configurations=[
azure.mgmt.network.NetworkInterfaceIpConfiguration(
name='default',
private_ip_allocation_method=azure.mgmt.network.IpAllocationMethod.dynamic,
subnet=subnet,
public_ip_address=azure.mgmt.network.ResourceId(
id=public_ip_id,
),
),
],
),
)
result = self.network_client.network_interfaces.get(
group_name,
interface_name,
)
return result.network_interface.id
# Create a new vm using the source disk
# Return:
# IP address
# Parameters:
# group_name: Group name where the virtual machine is in (except the disk)
# storage_name: Storage account name where the source and target disk are in
# vm_name: VM name, need to be public unique
# region: Default: "eastus"
# machine_size: Machine Size
# image_uri: URI of the source disk
# disk_container_name: Container of the target disk (Create if not exist)
def create_vm_from_ami(self, group_name="demogroup", storage_name="snsn", vm_name="vmvm",
region="", machine_size=azure.mgmt.compute.VirtualMachineSizeTypes.standard_a0, image_uri="", disk_container_name="vhds"):
if region == "":
region = self.region
virtual_network_name=storage_name
subnet_name=storage_name
network_interface_name=vm_name
public_ip_name=vm_name
os_disk_name=vm_name
computer_name=vm_name
# In our Images, the admin user and password are configured already. Changing the paramenters here won't work
username="ubuntu"
password="Cloud@123"
# 1. Create a resource group
result = self.resource_client.resource_groups.create_or_update(
group_name,
azure.mgmt.resource.ResourceGroup(
location=region,
),
)
print 'created (updated) resource group'
# 2. Create the network interface using a helper function (defined below)
nic_id = self.create_network_interface(
self.network_client,
region,
group_name,
network_interface_name,
virtual_network_name,
subnet_name,
public_ip_name,
)
print 'created virtual network and network interface card'
print 'creating virtual machine'
# 3. Create the virtual machine
result = self.compute_client.virtual_machines.create_or_update(
group_name,
azure.mgmt.compute.VirtualMachine(
location=region,
name=vm_name,
os_profile=azure.mgmt.compute.OSProfile(
admin_username=username,
admin_password=password,
computer_name=computer_name,
),
hardware_profile=azure.mgmt.compute.HardwareProfile(
virtual_machine_size=machine_size
),
network_profile=azure.mgmt.compute.NetworkProfile(
network_interfaces=[
azure.mgmt.compute.NetworkInterfaceReference(
reference_uri=nic_id,
),
],
),
storage_profile=azure.mgmt.compute.StorageProfile(
os_disk=azure.mgmt.compute.OSDisk(
caching=azure.mgmt.compute.CachingTypes.none,
create_option=azure.mgmt.compute.DiskCreateOptionTypes.from_image,
name=os_disk_name,
virtual_hard_disk=azure.mgmt.compute.VirtualHardDisk(
uri='https://{0}.blob.core.windows.net/{1}/{2}.vhd'.format(
storage_name,
disk_container_name,
os_disk_name,
),
),
# AMI uri
source_image=azure.mgmt.compute.VirtualHardDisk(
uri=image_uri,
),
operating_system_type= azure.mgmt.compute.OperatingSystemTypes.linux
),
),
),
)
# Get the ip address
while True:
try:
result = self.network_client.public_ip_addresses.get(group_name, public_ip_name)
break
except:
print 'getting ip error, recall it now'
pass
print('VM {0} available at IP: {1}'.format(vm_name, result.public_ip_address.ip_address))
return result.public_ip_address.ip_address
def help_book():
print 'usage: python azure_demo_create_vm_from_ami.py STORAGE_ACCOUNT_NAME SUBSCRIPTION_ID ENDPOINT_URI APPLICATION_ID APPLICATION_SECRET_KEY DISK_NAME MACHINE_SIZE'
def demo():
if 8 != len(sys.argv):
help_book()
return
print 'Your source os disk should be in storage account[{0}]. \n Your subscription id is {1} \n Your Endpoint uri is {2} \n Your application id is {3} \n your application secret key is {4} \n Please make sure you input the correct parameters for these!'.format(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
api = azure_api(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
time_stamp = int(time.time()) % 10000
rand_stamp = random.randint(1, 1000)
pse_uniqname = "cc"+str(time_stamp)+str(rand_stamp)
storage_account_name=sys.argv[1]
# image_uri = "https://" + storage_account_name + ".blob.core.windows.net/system/Microsoft.Compute/Images/vhds/primertest-osDisk.7ec2e680-5a2f-462b-ba77-cd7b707389d4.vhd"
image_uri = "https://" + storage_account_name + ".blob.core.windows.net/system/Microsoft.Compute/Images/vhds/" + sys.argv[6]
group_name_for_new_vm = pse_uniqname+"group" # specify your new group name
vm_name = pse_uniqname + "vm" # specify your vm name (should be public unique)
# api.create_vm_from_ami(storage_name=storage_account_name, vm_name=vm_name, group_name=group_name_for_new_vm, image_uri=image_uri, machine_size="Standard_A0")
api.create_vm_from_ami(storage_name=storage_account_name, vm_name=vm_name, group_name=group_name_for_new_vm, image_uri=image_uri, machine_size=sys.argv[7])
if __name__ == '__main__':
demo()
|
|
#
# No-U-Turn Sampler (NUTS) MCMC method
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import asyncio
import pints
import numpy as np
class NutsState(object):
"""
Class to hold information about the current state of the NUTS hamiltonian
integration path.
NUTS builds up the integration path implicitly via recursion up a binary
tree, this class handles combining states from different subtrees (see
`update`). The algorithm integrates both backwards ("minus") and forwards
("plus") in time, so this state must keep track of both end points of the
integration path.
Attributes
----------
theta_minus: ndarray
parameter value at the backwards end of the integration path
theta_plus: ndarray
parameter value at the forwards end of the integration path
r_minus: ndarray
momentum value at the backwards end of the integration path
r_plus: ndarray
momentum value at the forwards end of the integration path
L_minus: float
logpdf value at the backwards end of the integration path
L_plus: float
logpdf value at the forwards end of the integration path
grad_L_minus: float
gradient of logpdf at the backwards end of the integration path
grad_L_plus: float
gradient of logpdf at the forwards end of the integration path
n: int or float
the weight given to each subtree
s: int
0 if sufficient leapfrog steps have been taken, 1 otherwise
theta: ndarray
the current accepted point along the path
L: float
the logpdf of the current accepted point
grad_L: float
the gradient of the logpdf at the current accepted point
alpha: float
the acceptance probability
n_alpha: float
a count of the points along this path
divergent: boolean
True if one of the points in the tree was divergent
"""
def __init__(self, theta, r, L, grad_L, n, s, alpha, n_alpha, divergent,
inv_mass_matrix):
self.theta_minus = np.copy(theta)
self.theta_plus = np.copy(theta)
self.r_minus = np.copy(r)
self.r_plus = np.copy(r)
self.r_sum = np.copy(r)
self.L_minus = np.copy(L)
self.L_plus = np.copy(L)
self.grad_L_minus = np.copy(grad_L)
self.grad_L_plus = np.copy(grad_L)
self.n = n
self.s = s
self.theta = np.copy(theta)
self.L = L
self.grad_L = np.copy(grad_L)
self.alpha = alpha
self.n_alpha = n_alpha
self.divergent = divergent
self.inv_mass_matrix = inv_mass_matrix
def update(self, other_state, direction, root):
"""
if ``root == True``, this combines a depth j subtree (``self``) with a
depth j+1 (``other_state``) subtree, which corresponds to the higher
level loop in the nuts algorithm.
if ``root == False``, this combines two subtrees with depth j, which
occurs when the nuts algorithm is implicitly building up the tree with
the build_tree subroutine.
direction is the current direction of integration, either forwards
(``direction == 1``), or backwards (``direction = -1``).
"""
# update the appropriate end of the tree according to what direction we
# are integrating
if direction == -1:
self.theta_minus = other_state.theta_minus
self.r_minus = other_state.r_minus
r_minus_plus = other_state.r_plus
r_plus_minus = self.r_minus
r_sum_minus = other_state.r_sum
r_sum_plus = self.r_sum
self.L_minus = other_state.L_minus
self.grad_L_minus = other_state.grad_L_minus
else:
self.theta_plus = other_state.theta_plus
self.r_plus = other_state.r_plus
r_minus_plus = self.r_plus
r_plus_minus = other_state.r_minus
r_sum_minus = self.r_sum
r_sum_plus = other_state.r_sum
self.L_plus = other_state.L_plus
self.grad_L_plus = other_state.grad_L_plus
# Notes: alpha and n_alpha are only accumulated within build_tree
# Update: perhaps not according to stan code...
if root:
self.alpha += other_state.alpha
self.n_alpha += other_state.n_alpha
else:
self.alpha += other_state.alpha
self.n_alpha += other_state.n_alpha
# propogate divergence up the tree
self.divergent |= other_state.divergent
self.s *= other_state.s
# check if chain is stopping
if self.s == 0:
return
# for non-root merges accumulate tree weightings before probability
# calculation
if not root:
self.n = np.logaddexp(self.n, other_state.n)
# accept a new point based on the weighting of the two trees
p = min(1, np.exp(other_state.n - self.n))
if p > 0.0 and np.random.uniform() < p:
self.theta = other_state.theta
self.L = other_state.L
self.grad_L = other_state.grad_L
# for root merges accumulate tree weightings after probability
# calculation
if root:
self.n = np.logaddexp(self.n, other_state.n)
# integrate momentum over chain
self.r_sum += other_state.r_sum
# test if the path has done a U-Turn, if we are stopping due to a
# U-turn or a divergent iteration propogate this up the tree with
# self.s
if self.inv_mass_matrix.ndim == 1:
r_sharp_minus = self.inv_mass_matrix * self.r_minus
r_sharp_plus = self.inv_mass_matrix * self.r_plus
r_sharp_plus_minus = self.inv_mass_matrix * r_plus_minus
r_sharp_minus_plus = self.inv_mass_matrix * r_minus_plus
else:
r_sharp_minus = self.inv_mass_matrix.dot(self.r_minus)
r_sharp_plus = self.inv_mass_matrix.dot(self.r_plus)
r_sharp_plus_minus = self.inv_mass_matrix.dot(r_plus_minus)
r_sharp_minus_plus = self.inv_mass_matrix.dot(r_minus_plus)
# test merged trees
self.s *= int((self.r_sum).dot(r_sharp_minus) > 0)
self.s *= int((self.r_sum).dot(r_sharp_plus) > 0)
# test across subtrees
self.s *= int((r_sum_minus + r_plus_minus).dot(r_sharp_minus) > 0)
self.s *= int((r_sum_minus + r_plus_minus).dot(r_sharp_plus_minus) > 0)
self.s *= int((r_sum_plus + r_minus_plus).dot(r_sharp_minus_plus) > 0)
self.s *= int((r_sum_plus + r_minus_plus).dot(r_sharp_plus) > 0)
def kinetic_energy(r, inv_mass_matrix):
if inv_mass_matrix.ndim == 1:
return 0.5 * np.inner(r, inv_mass_matrix * r)
else:
return 0.5 * np.inner(r, inv_mass_matrix.dot(r))
# All the functions below are written as coroutines to enable the recursive
# nuts algorithm to be written using the ask-and-tell interface used by PINTS,
# see main coroutine function ``nuts_sampler`` for more details
@asyncio.coroutine
def leapfrog(theta, L, grad_L, r, epsilon, inv_mass_matrix):
"""
performs a leapfrog step using a step_size ``epsilon`` and an inverse mass
matrix ``inv_mass_matrix``.
The inverse mass matrix can be a 2 dimensional ndarray, in which case it is
interpreted as a dense matrix, or a 1 dimensional ndarray, in which case it
is interpreted as a diagonal matrix.
"""
r_new = r + 0.5 * epsilon * grad_L
if inv_mass_matrix.ndim == 1:
theta_new = theta + epsilon * inv_mass_matrix * r_new
else:
theta_new = theta + epsilon * inv_mass_matrix.dot(r_new)
L_new, grad_L_new = (yield theta_new)
r_new += 0.5 * epsilon * grad_L_new
return L_new, grad_L_new, theta_new, r_new
@asyncio.coroutine
def build_tree(state, v, j, adaptor, hamiltonian0, hamiltonian_threshold):
"""
Implicitly build up a subtree of depth ``j`` for the NUTS sampler.
"""
if j == 0:
# Base case - take one leapfrog in the direction v
if v == -1:
theta = state.theta_minus
r = state.r_minus
L = state.L_minus
grad_L = state.grad_L_minus
else:
theta = state.theta_plus
r = state.r_plus
L = state.L_plus
grad_L = state.grad_L_plus
L_dash, grad_L_dash, theta_dash, r_dash = \
yield from leapfrog(theta, L, grad_L, r, v * adaptor.get_epsilon(),
adaptor.get_inv_mass_matrix())
hamiltonian_dash = L_dash \
- kinetic_energy(r_dash, adaptor.get_inv_mass_matrix())
if np.isnan(hamiltonian_dash):
comparison = float('-inf')
else:
comparison = hamiltonian_dash - hamiltonian0
n_dash = comparison
alpha_dash = min(1.0, np.exp(comparison))
divergent = -comparison > hamiltonian_threshold
s_dash = int(not divergent)
n_alpha_dash = 1
return NutsState(
theta_dash, r_dash, L_dash, grad_L_dash, n_dash, s_dash,
alpha_dash, n_alpha_dash, divergent,
adaptor.get_inv_mass_matrix()
)
else:
# Recursion - implicitly build the left and right subtrees
state_dash = yield from \
build_tree(state, v, j - 1, adaptor, hamiltonian0,
hamiltonian_threshold)
if state_dash.s == 1:
state_double_dash = yield from \
build_tree(state_dash, v, j - 1, adaptor, hamiltonian0,
hamiltonian_threshold)
state_dash.update(state_double_dash, direction=v, root=False)
return state_dash
@asyncio.coroutine
def find_reasonable_epsilon(theta, L, grad_L, inv_mass_matrix):
"""
Pick a reasonable value of epsilon close to when the acceptance
probability of the Langevin proposal crosses 0.5. This is based on
Algorithm 4 in [1]_ (with scaled mass matrix as per section 4.2).
Note: inv_mass_matrix can be a 1-d ndarray and in this case is interpreted
as a diagonal matrix, or can be given as a fully dense 2-d ndarray.
"""
# intialise at epsilon = 1.0 (shouldn't matter where we start)
epsilon = 1.0
# randomly sample momentum
if inv_mass_matrix.ndim == 1:
r = np.random.normal(
np.zeros(len(theta)),
np.sqrt(1.0 / inv_mass_matrix)
)
else:
r = np.random.multivariate_normal(
np.zeros(len(theta)),
np.linalg.inv(inv_mass_matrix)
)
hamiltonian = L - kinetic_energy(r, inv_mass_matrix)
L_dash, grad_L_dash, theta_dash, r_dash = \
yield from leapfrog(theta, L, grad_L, r, epsilon, inv_mass_matrix)
hamiltonian_dash = L_dash - kinetic_energy(r_dash, inv_mass_matrix)
if np.isnan(hamiltonian_dash):
comparison = float('-inf')
else:
comparison = hamiltonian_dash - hamiltonian
# determine whether we are doubling or halving
alpha = 2 * int(comparison > np.log(0.5)) - 1
# double or half epsilon until acceptance probability crosses 0.5
while comparison * alpha > np.log(2) * (-alpha):
epsilon = 2**alpha * epsilon
L_dash, grad_L_dash, theta_dash, r_dash = \
yield from leapfrog(theta, L, grad_L, r, epsilon, inv_mass_matrix)
hamiltonian_dash = L_dash - kinetic_energy(r_dash, inv_mass_matrix)
if np.isnan(hamiltonian_dash): # pragma: no cover
comparison = float('-inf')
else:
comparison = hamiltonian_dash - hamiltonian
return epsilon
@asyncio.coroutine
def nuts_sampler(x0, delta, num_adaption_steps, sigma0,
hamiltonian_threshold, max_tree_depth,
use_dense_mass_matrix):
"""
The dual averaging NUTS mcmc sampler given in Algorithm 6 of [1]_.
Implements the multinomial sampling suggested in [2]_. Implements a mass
matrix for the dynamics, which is detailed in [2]_. Both the step size and
the mass matrix is adapted using a combination of the dual averaging
detailed in [1]_ and the windowed adaption for the mass matrix and step
size implemented in the Stan library (https://github.com/stan-dev/stan)
Implemented as a coroutine that continually generates new theta values to
evaluate (L, L') at. Users must send (L, L') back to the coroutine to
continue execution. The end of an mcmc step is signalled by generating a
tuple of values (theta, L, acceptance probability, number of leapfrog
steps)
Arguments
---------
x0: ndarray
starting point
delta: float
target acceptance probability (Dual Averaging scheme)
num_adaption_steps: int
number of adaption steps (Dual Averaging scheme)
hamiltonian_threshold: float
threshold to test divergent iterations
max_tree_depth: int
maximum tree depth
use_dense_mass_matrix: bool
if False, use a diagonal mass matrix, if True use a fully dense mass
matrix
References
----------
.. [1] Hoffman, M. D., & Gelman, A. (2014). The No-U-Turn sampler:
adaptively setting path lengths in Hamiltonian Monte Carlo.
Journal of Machine Learning Research, 15(1), 1593-1623.
.. [2] Betancourt, M. (2018). `A Conceptual Introduction to Hamiltonian
Monte Carlo`, https://arxiv.org/abs/1701.02434.
"""
# Initialise sampler with x0 and calculate logpdf
theta = x0
L, grad_L = (yield theta)
# Check first point is somewhere sensible
if not np.isfinite(L):
raise ValueError(
'Initial point for MCMC must have finite logpdf.')
# pick the initial inverse mass matrix as the provided sigma0.
# reduce to a diagonal matrix if not using a dense mass matrix
if use_dense_mass_matrix:
init_inv_mass_matrix = sigma0
init_inv_mass_matrix = 1e-3 * np.eye(len(x0))
else:
init_inv_mass_matrix = np.diag(sigma0)
init_inv_mass_matrix = 1e-3 * np.ones(len(x0))
# find a good value to start epsilon at (this will later be refined so that
# the acceptance probability matches delta)
epsilon = yield from find_reasonable_epsilon(theta, L, grad_L,
init_inv_mass_matrix)
# create adaption for epsilon and mass matrix
adaptor = pints.DualAveragingAdaption(
num_adaption_steps, delta, epsilon, init_inv_mass_matrix)
# start at iteration 1
m = 1
# provide an infinite generator of mcmc steps....
while True:
# randomly sample momentum
if use_dense_mass_matrix:
r0 = np.random.multivariate_normal(
np.zeros(len(theta)), adaptor.get_mass_matrix())
else:
r0 = np.random.normal(np.zeros(len(theta)),
np.sqrt(adaptor.get_mass_matrix()))
hamiltonian0 = L - kinetic_energy(r0, adaptor.get_inv_mass_matrix())
# create initial integration path state
state = NutsState(theta=theta, r=r0, L=L, grad_L=grad_L,
n=0.0, s=1, alpha=1, n_alpha=1, divergent=False,
inv_mass_matrix=adaptor.get_inv_mass_matrix())
j = 0
# build up an integration path with 2^j points, stopping when we either
# encounter a U-Turn, or reach a max number of points 2^max_tree_depth
while j < max_tree_depth and state.s == 1:
# pick a random direction to integrate in
# (to maintain detailed balance)
if np.random.randint(0, 2):
vj = 1
else:
vj = -1
# recursivly build up tree in that direction
state_dash = yield from \
build_tree(state, vj, j, adaptor,
hamiltonian0, hamiltonian_threshold)
state.update(state_dash, direction=vj, root=True)
j += 1
# update current position in chain
theta = state.theta
L = state.L
grad_L = state.grad_L
# adapt epsilon and mass matrix using dual averaging
restart_stepsize_adapt = \
adaptor.step(state.theta, state.alpha / state.n_alpha)
if restart_stepsize_adapt:
epsilon = yield from \
find_reasonable_epsilon(theta, L, grad_L,
adaptor.get_inv_mass_matrix())
adaptor.init_adapt_epsilon(epsilon)
# signal calling process that mcmc step is complete by passing a tuple
# (rather than an ndarray)
yield (theta,
L,
grad_L,
state.alpha / state.n_alpha,
state.n_alpha,
state.divergent)
# next step
m += 1
class NoUTurnMCMC(pints.SingleChainMCMC):
r"""
Implements the No U-Turn Sampler (NUTS) with dual averaging, as described
in Algorithm 6 in [1]_.
Implements the multinomial sampling suggested in [2]_. Implements a mass
matrix for the dynamics, which is detailed in [2]_. Both the step size and
the mass matrix is adapted using a combination of the dual averaging
detailed in [1]_, and the windowed adaption for the mass matrix and step
size implemented in the Stan library (https://github.com/stan-dev/stan).
Like Hamiltonian Monte Carlo, NUTS imagines a particle moving over negative
log-posterior (NLP) space to generate proposals. Naturally, the particle
tends to move to locations of low NLP -- meaning high posterior density.
Unlike HMC, NUTS allows the number of steps taken through parameter space
to depend on position, allowing local adaptation.
Note: This sampler is only supported on Python versions 3.3 and newer.
Extends :class:`SingleChainMCMC`.
References
----------
.. [1] Hoffman, M. D., & Gelman, A. (2014). The No-U-Turn sampler:
adaptively setting path lengths in Hamiltonian Monte Carlo.
Journal of Machine Learning Research, 15(1), 1593-1623.
.. [2] Betancourt, M. (2018). `A Conceptual Introduction to Hamiltonian
Monte Carlo`, https://arxiv.org/abs/1701.02434.
"""
def __init__(self, x0, sigma0=None):
super(NoUTurnMCMC, self).__init__(x0, sigma0)
# hyperparameters
self._num_adaption_steps = 500
self._delta = 0.8
self._step_size = None
self._max_tree_depth = 10
self._use_dense_mass_matrix = False
# Default threshold for Hamiltonian divergences
# (currently set to match Stan)
self._hamiltonian_threshold = 10**3
# coroutine nuts sampler
self._nuts = None
# number of mcmc iterations
self._mcmc_iteration = 0
# Logging
self._last_log_write = 0
self._mcmc_acceptance = 0
self._n_leapfrog = 0
# current point in chain
self._current = self._x0
# next point to ask user to evaluate
self._next = self._current
# Set initial state
self._running = False
self._ready_for_tell = False
# Divergence checking
# Create a vector of divergent iterations
self._divergent = np.asarray([], dtype='int')
def ask(self):
""" See :meth:`SingleChainMCMC.ask()`. """
# Check ask/tell pattern
if self._ready_for_tell:
raise RuntimeError('Ask() called when expecting call to tell().')
# Initialise on first call
if not self._running:
self._nuts = nuts_sampler(self._x0, self._delta,
self._num_adaption_steps,
self._sigma0,
self._hamiltonian_threshold,
self._max_tree_depth,
self._use_dense_mass_matrix)
# coroutine will ask for self._x0
self._next = next(self._nuts)
self._running = True
self._ready_for_tell = True
return np.array(self._next, copy=True)
def delta(self):
"""
Returns delta used in leapfrog algorithm.
"""
return self._delta
def divergent_iterations(self):
"""
Returns the iteration number of any divergent iterations.
"""
return self._divergent
def hamiltonian_threshold(self):
"""
Returns threshold difference in Hamiltonian value from one iteration to
next which determines whether an iteration is divergent.
"""
return self._hamiltonian_threshold
def _log_init(self, logger):
""" See :meth:`Loggable._log_init()`. """
logger.add_float('Accept.')
logger.add_counter('Steps.')
def _log_write(self, logger):
""" See :meth:`Loggable._log_write()`. """
# print nothing if no mcmc iterations since last log
if self._last_log_write == self._mcmc_iteration:
logger.log(None)
logger.log(None)
else:
logger.log(self._mcmc_acceptance)
logger.log(self._n_leapfrog)
self._mcmc_acceptance = 0
self._n_leapfrog = 0
self._last_log_write = self._mcmc_iteration
def max_tree_depth(self):
"""
Returns the maximum tree depth ``D`` for the algorithm. For each
iteration, the number of leapfrog steps will not be greater than
``2^D``.
"""
return self._max_tree_depth
def n_hyper_parameters(self):
""" See :meth:`TunableMethod.n_hyper_parameters()`. """
return 1
def name(self):
""" See :meth:`pints.MCMCSampler.name()`. """
return 'No-U-Turn MCMC'
def needs_sensitivities(self):
""" See :meth:`pints.MCMCSampler.needs_sensitivities()`. """
return True
def number_adaption_steps(self):
"""
Returns number of adaption steps used in the NUTS algorithm.
"""
return self._num_adaption_steps
def set_delta(self, delta):
"""
Sets delta for the nuts algorithm. This is the goal acceptance
probability for the algorithm. Used to set the scalar magnitude of the
leapfrog step size.
"""
if self._running:
raise RuntimeError('cannot set delta while sampler is running')
if delta < 0 or delta > 1:
raise ValueError('delta must be in [0, 1]')
self._delta = delta
def set_hamiltonian_threshold(self, hamiltonian_threshold):
"""
Sets threshold difference in Hamiltonian value from one iteration to
next which determines whether an iteration is divergent.
"""
if hamiltonian_threshold < 0:
raise ValueError('Threshold for divergent iterations must be ' +
'non-negative.')
self._hamiltonian_threshold = hamiltonian_threshold
def set_hyper_parameters(self, x):
"""
The hyper-parameter vector is ``[number_adaption_steps]``.
See :meth:`TunableMethod.set_hyper_parameters()`.
"""
self.set_number_adaption_steps(x[0])
def set_max_tree_depth(self, max_tree_depth):
"""
Sets the maximum tree depth ``D`` for the algorithm. For each
iteration, the number of leapfrog steps will not be greater than
``2^D``
"""
if max_tree_depth < 0:
raise ValueError('Maximum tree depth must be non-negative.')
self._max_tree_depth = max_tree_depth
def set_number_adaption_steps(self, n):
"""
Sets number of adaptions steps in the nuts algorithm. This is the
number of mcmc steps that are used to determin the best value for
epsilon, the scalar magnitude of the leafrog step size.
"""
if self._running:
raise RuntimeError(
'cannot set number of adaption steps while sampler is running')
if n < 0:
raise ValueError('number of adaption steps must be non-negative')
self._num_adaption_steps = int(n)
def set_use_dense_mass_matrix(self, use_dense_mass_matrix):
"""
If ``use_dense_mass_matrix`` is False then algorithm uses a diagonal
matrix for the mass matrix. If True then a fully dense mass matrix is
used.
"""
self._use_dense_mass_matrix = bool(use_dense_mass_matrix)
def tell(self, reply):
""" See :meth:`pints.SingleChainMCMC.tell()`. """
if not self._ready_for_tell:
raise RuntimeError('Tell called before proposal was set.')
self._ready_for_tell = False
# send log likelihood and gradient to nuts coroutine,
# return value is the next theta to evaluate at
self._next = self._nuts.send(reply)
# coroutine signals end of current step by sending a tuple of
# information about the last mcmc step
if isinstance(self._next, tuple):
# extract next point in chain, its logpdf, the acceptance
# probability and the number of leapfrog steps taken during
# the last mcmc step
self._current = self._next[0]
current_logpdf = self._next[1]
current_gradient = self._next[2]
current_acceptance = self._next[3]
current_n_leapfrog = self._next[4]
divergent = self._next[5]
# Increase iteration count
self._mcmc_iteration += 1
# average quantities for logging
n_it_since_log = self._mcmc_iteration - self._last_log_write
self._mcmc_acceptance = (
(n_it_since_log * self._mcmc_acceptance + current_acceptance) /
(n_it_since_log + 1)
)
self._n_leapfrog = (
(n_it_since_log * self._n_leapfrog + current_n_leapfrog) /
(n_it_since_log + 1)
)
# store divergent iterations
if divergent:
self._divergent = np.append(
self._divergent, self._mcmc_iteration)
# request next point to evaluate
self._next = next(self._nuts)
# Return current position as next sample in the chain
return (
np.copy(self._current),
(current_logpdf, np.copy(current_gradient)),
True
)
else:
# Return None to indicate there is no new sample for the chain
return None
def use_dense_mass_matrix(self):
"""
Returns if the algorithm uses a dense (True) or diagonal (False) mass
matrix.
"""
return self._use_dense_mass_matrix
|
|
############################
from Bio import SeqIO
#from Bio.Seq import Seq
#from BCBio.GFF import GFFExaminer
#from BCBio import GFF
#from Bio.Alphabet import IUPAC
#import pprint
import csv
import re
#import sys
#import string
#import os
#from Bio.SeqRecord import SeqRecord
#from Bio.Alphabet import generic_dna
from Bio import SeqFeature
#from Bio.SeqFeature import FeatureLocation
#from Bio.SeqFeature import SeqFeature
#from Bio import Entrez
#from httplib import HTTPException
#import subprocess
#import csv
#import fasta
#import string
#import sys
#import re
#import random
#import os
#import math
##from sets import Set
#from scipy import stats
#from collections import Counter
#from Bio.Alphabet import IUPAC
#from Bio import Entrez
#from Bio.Seq import Seq
#from Bio import SeqFeature
#from Bio import SeqRecord
#from Bio.SeqFeature import FeatureLocation
#from Bio import SeqIO
#from httplib import HTTPException
# <codecell>
##this takes the file ordered_SNP_pos.csv (which is NOT ordered -by chrom, then pos- coming out of SHM_Arabidopsis_Good_SNPs_to_csv_05-06-14.py, but csv ordered by hand)
ordered_snp_list = [[],[],[],[],[]]
with open("ordered_SNP_pos.csv", "rU") as o_snp:
o_snp_reader = csv.reader(o_snp)
for r in o_snp_reader:
if r[0] == "Chr":
continue
ordered_snp_list[int(r[0])-1].append(r[1])
# <codecell>
gid_dict = {1:"240254421", 2:"240254678", 3:"240255695", 4:"240256243", 5:"240256493"} #GI ids for TAIR10
# <codecell>
#index_genbank_features adapted from http://www2.warwick.ac.uk/fac/sci/moac/people/students/peter_cock/python/genbank/#indexing_features
#
##http://www.insdc.org/documents/feature_table.html
##
def index_genbank_features(gb_record, feature_type, qualifier) :
answer = dict()
for (index, feature) in enumerate(gb_record.features) :
if feature.type==feature_type :
if qualifier in feature.qualifiers :
for value in feature.qualifiers[qualifier] :
if value in answer :
answer[value].append(index)
else :
answer[value] = [index]
return answer
cds_index = []
print "MAKE CDS FEATURE INDEX"
for c in gid_dict: # for each of the original chromomsome sequences
print "Processing chrom", c
filename = "GI" + str(gid_dict[c]) + ".gbk"
print filename
for gb_record in SeqIO.parse(open(filename, "r"), "gb"): #based on http://www2.warwick.ac.uk/fac/sci/moac/people/students/peter_cock/python/genbank/
loc_tag_cds_dict = index_genbank_features(gb_record, "CDS", "locus_tag") #based on http://www2.warwick.ac.uk/fac/sci/moac/people/students/peter_cock/python/genbank/
#print "loc_tag_cds_dict", loc_tag_cds_dict
cds_index.append(loc_tag_cds_dict)
print "len(cds_index)", len(cds_index)
chr_counter = 1
for i in cds_index:
print len(i), "number of cds features on Chr", chr_counter
chr_counter +=1
## <codecell>
####there are a few cds' in the GenBank files that produce errors when translated, catch them and exclude those cds' from further analysis
print "LOOK FOR TRANSLATION ERRORS"
with open("Translation_errors_Col_Ref.csv", "wb") as error_file:
error_writer = csv.writer(error_file)
error_writer.writerow(["Chr", "GBfeature_index"])
for c in gid_dict:
count_temp = 0
print "Processing chrom", c
gb_file = "GI" + str(gid_dict[c]) + ".gbk"
error_counter = 0
for gb_record in SeqIO.parse(open(gb_file, "r"), "gb"):
print "len(cds_index[c-1])", len(cds_index[c-1])
for key in cds_index[c-1]: #a list of dictionaries, with one dictionary per chromosome, key = AT_id, value = index
val = cds_index[c-1][key]
for index in val:
count_temp +=1
if count_temp %1000 ==0:
print "count_temp", count_temp
#print index
gb_feature = gb_record.features[index]
straight_gb = gb_feature.extract(gb_record.seq)
try:
trans_gb = straight_gb.translate(cds=True)#Ok to force cds = True because we're testing wether the GB feature annotated as a cds, can be translated without error
str(gb_feature.qualifiers['translation'][0]) == str(trans_gb)
except:
error_counter +=1
error_writer.writerow([c, index]) #write the chromosome and index to file
continue
print "Chromosome", c, ", number translation errors:", error_counter
# <codecell>
error_list = [[], [], [], [], []]
with open("Translation_errors_Col_Ref.csv", "rU") as errors:
error_reader = csv.reader(errors)
for r in error_reader:
if r[0] == "Chr":
continue
error_list[int(r[0])-1].append(r[1])
print "error list", error_list
chr_counter = 1
for e in error_list:
print len(e), "tranlation errors on Chr", chr_counter
chr_counter +=1
###### <codecell>
######look up cds features in Cvi genbank file, translate cds and ask if it is the same as the "official" translation, if so locate the position of the mutation
print "LOOK FOR NON-SYN SNPs"
def trans_cds_feats(c, gb_file):
count_non_sym = 0
for gb_record in SeqIO.parse(open(gb_file, "r"), "gb"):
print "Processing ", gb_file
find_file_prefix = re.search(".gbk", gb_file)
file_prefix = gb_file[:find_file_prefix.start()]
print file_prefix
seq_out_file = file_prefix + "_trans_results.csv"
with open(seq_out_file, "w") as out:
out_writer = csv.writer(out)
out_writer.writerow(["AT_id", "chr", "feat_start", "feat_end", "feat_strand", "protein_id", "product", "function", "translation", "gb_translation", "mutated_aa"])
print "Number of CDS features ", len(cds_index[int(c)-1]), " Col Genome"
new_geno_tag_cds_dict = index_genbank_features(gb_record, "CDS", "locus_tag") #based on http://www2.warwick.ac.uk/fac/sci/moac/people/students/peter_cock/python/genbank/
#print "loc_tag_cds_dict", loc_tag_cds_dict
#new_geno_cds = 0
#for f in gb_record.features:
# if f.type == "CDS":
# new_geno_cds += 1
#print "Number of CDS features ", new_geno_cds, " SNP Genome"
#assert len(cds_index[int(c)-1]) == new_geno_cds # make sure that there are the same number of features in the orginal (Col) Genbank record as there are in the new Cvi or Ler Genbank record
print "Number of CDS features ", len(new_geno_tag_cds_dict), " SNP Genome"
assert len(cds_index[int(c)-1]) == len(new_geno_tag_cds_dict) # make sure that there are the same number of features in the orginal (Col) Genbank record as there are in the new Cvi or Ler Genbank record
for key in cds_index[int(c)-1]: #a list of dictionaries, with one dictionary per chromosome, key = AT_id, value = [index]
val = cds_index[int(c)-1][key]
for index in val: #sometimes there is more than one record for AT id
if index in error_list:
continue
gb_feature = gb_record.features[index]
straight_gb = gb_feature.extract(gb_record.seq)
startposition = gb_feature.location.start.position
endposition = gb_feature.location.end.position
strand = gb_feature.location.strand
#feat_position = (startposition, endposition, strand)
temp_snp_list = []#list of positions of SNPs in a feature
snp_count = 0
for k in ordered_snp_list[c-1]:#ordered list of SNP pos
if int(k) in gb_feature:
temp_snp_list.append(int(k))
snp_count +=1
mut_list = ""
trans_gb = straight_gb.translate()
find_stop = re.search("\*", str(trans_gb))
if find_stop == None:
mut_list = "NoStop"
out_writer.writerow([key, c, startposition, endposition, strand, str(gb_feature.qualifiers.get('protein_id', "")), str(gb_feature.qualifiers.get('product', "")), str(gb_feature.qualifiers.get('function', "")), str(trans_gb), str(gb_feature.qualifiers['translation'][0]), mut_list])
break # we could try to track down the next stop codon and send that sequence into function prediction - but it would take some doing - TODO?
trans_seq = trans_gb[:find_stop.start()] #the translated sequence without the "*", so that we can ask if the sequence itself differs from the Genbank translation
print trans_seq
print type(trans_seq)
if len(trans_seq) == 0:
break
if str(trans_seq)[0] !="M":
#print "NoStart"
mut_list = "NoStart"
out_writer.writerow([key, c, startposition, endposition, strand, str(gb_feature.qualifiers.get('protein_id', "")), str(gb_feature.qualifiers.get('product', "")), str(gb_feature.qualifiers.get('function', "")), str(trans_seq), str(gb_feature.qualifiers['translation'][0]), mut_list])
break #Potentially catostrophic SNP, any remaining SNPs in this featureare irrelevant
if len(str(trans_seq)) < len(str(gb_feature.qualifiers['translation'][0])):
#print "Pre-mature stop"
stop_at = len(str(trans_seq))-1
org_aa = str(gb_feature.qualifiers['translation'][0][stop_at])
mut_note = org_aa + str(stop_at) + "*" #Pre-mature stop codon
mut_list = mut_note
out_writer.writerow([key, c, startposition, endposition, strand, str(gb_feature.qualifiers.get('protein_id', "")), str(gb_feature.qualifiers.get('product', "")), str(gb_feature.qualifiers.get('function', "")), str(trans_seq), str(gb_feature.qualifiers['translation'][0]), mut_list])
break #Potentially catostrophic SNP, any remaining SNPs in this featureare irrelevant
if str(trans_seq) != str(gb_feature.qualifiers['translation'][0]):
for n in range(min(len(str(trans_seq)), len(str(gb_feature.qualifiers['translation'][0])))):
print "n", n
print "len(str(trans_seq))", len(str(trans_seq))
print "len(str(gb_feature.qualifiers['translation'][0])", len(str(gb_feature.qualifiers['translation'][0]))
if str(trans_seq[n]) != str(gb_feature.qualifiers['translation'][0][n]):
aa_pos = n+1
org_aa = str(gb_feature.qualifiers['translation'][0][n])
cvi_aa = str(trans_seq[n])
if aa_pos == 1: #if the first aa is not the same then the Cvi SNP interupted the start codon
#print "NoStart"
mut_list = "NoStart"
out_writer.writerow([key, c, startposition, endposition, strand, str(gb_feature.qualifiers.get('protein_id', "")), str(gb_feature.qualifiers.get('product', "")), str(gb_feature.qualifiers.get('function', "")), str(trans_seq), str(gb_feature.qualifiers['translation'][0]), mut_list])
break #Potentially catostrophic SNP, any remaining SNPs in this featureare irrelevant
else:
count_non_sym +=1
mut_note = org_aa + str(aa_pos) + cvi_aa
#print "mut_note", mut_note
if len(mut_list) >1:
mut_list = mut_list + "," + mut_note
else:
mut_list = mut_note
out_writer.writerow([key, c, startposition, endposition, strand, str(gb_feature.qualifiers.get('protein_id', "")), str(gb_feature.qualifiers.get('product', "")), str(gb_feature.qualifiers.get('function', "")), str(trans_seq), str(gb_feature.qualifiers['translation'][0]), mut_list])
break #break if it finds one snp in record - only need to know that *a* snp is in feature because sequence is already modified for ALL SNPs and looping over pos in dict
print "Number of non-synonymous SNPs for " + file_prefix + ": ", count_non_sym
for c in gid_dict:
for prefix in ["Cvi_SNPs_Chr", "Ler_SNPs_Chr"]:
gb_file = prefix + str(c) + ".gbk"
trans_cds_feats(c, gb_file)
|
|
#!/usr/bin/python
"""Utility functions for scripts in my bin diretory.
This module contains common utilities such as wrappers for
error/warning reporting, executing shell commands in a controlled way,
etc. These functions are shared by a number of helper scripts.
"""
import locale
import os
import re
import shlex
import signal
import subprocess
import sys
import tempfile
# Debugging verbosity level (0 -> no output)
flag_debug = 0
# Unit testing mode. If set to 1, throw exception instead of calling exit()
flag_unittest = 0
hrszre = re.compile(r"^([\d\.]+)(\S)$")
factors = {"K": 1024.0, "M": 1048576.0, "G": 1073741824.0}
def verbose(level, msg):
"""Print debug trace output of verbosity level is >= value in 'level'."""
if level <= flag_debug:
sys.stderr.write(msg + "\n")
def verbosity_level():
"""Return debug trace level."""
return flag_debug
def increment_verbosity():
"""Increment debug trace level by 1."""
global flag_debug
flag_debug += 1
def decrement_verbosity():
"""Lower debug trace level by 1."""
global flag_debug
flag_debug -= 1
def unit_test_enable():
"""Set unit testing mode."""
global flag_unittest
sys.stderr.write("+++ unit testing mode enabled +++\n")
flag_unittest = 1
def warning(msg):
"""Issue a warning to stderr."""
sys.stderr.write("warning: " + msg + "\n")
def error(msg):
"""Issue an error to stderr, then exit."""
errm = "error: " + msg + "\n"
sys.stderr.write(errm)
if flag_unittest:
raise Exception(errm)
else:
exit(1)
def docmd(cmd):
"""Run a command via subprocess, issuing fatal error if cmd fails."""
args = shlex.split(cmd)
verbose(2, "+ docmd executing: %s" % cmd)
rc = subprocess.call(args)
if rc != 0:
error("command failed: %s" % cmd)
# Similar to docmd, but return status after issuing failure message
def docmdnf(cmd):
"""Run a command via subprocess, returning exit status."""
args = shlex.split(cmd)
verbose(2, "+ docmd executing: %s" % cmd)
rc = subprocess.call(args)
return rc
# Similar to docmd, but suppress output
def doscmd(cmd, nf=None):
"""Run a command via subprocess, suppressing output unless error."""
verbose(2, "+ doscmd executing: %s" % cmd)
args = shlex.split(cmd)
cmdtf = tempfile.NamedTemporaryFile(mode="w", delete=True)
rc = subprocess.call(args, stdout=cmdtf, stderr=cmdtf)
if rc != 0:
warning("error: command failed (rc=%d) cmd: %s" % (rc, cmd))
warning("output from failing command:")
subprocess.call(["cat", cmdtf.name])
if nf:
return None
error("")
cmdtf.close()
return True
# invoke command, writing output to file
def docmdout(cmd, outfile, nf=None):
"""Run a command via subprocess, writing output to a file."""
verbose(2, "+ docmdout executing: %s > %s" % (cmd, outfile))
args = shlex.split(cmd)
with open(outfile, "w") as outfile:
rc = subprocess.call(args, stdout=outfile)
if rc != 0:
warning("error: command failed (rc=%d) cmd: %s" % (rc, cmd))
if nf:
return None
error("")
return True
# invoke command, writing output to file
def docmderrout(cmd, outfile, nf=None):
"""Run a command via subprocess, writing output to a file."""
verbose(2, "+ docmdout executing: %s > %s" % (cmd, outfile))
args = shlex.split(cmd)
try:
with open(outfile, "w") as outfile:
rc = subprocess.call(args, stdout=outfile, stderr=outfile)
if rc != 0:
if nf:
sys.stderr.write("error: command failed (rc=%d) cmd: %s\n" % (rc, cmd))
return rc
else:
error("command failed (rc=%d) cmd: %s\n" % (rc, cmd))
return rc
except IOError:
error("unable to open %s for writing" % outfile)
# invoke command, reading from one file and writing to another
def docmdinout(cmd, infile, outfile):
"""Run a command via subprocess with input and output file."""
verbose(2, "+ docmdinout executing: %s < %s > %s" % (cmd, infile, outfile))
args = shlex.split(cmd)
cmdtf = tempfile.NamedTemporaryFile(mode="w", delete=True)
with open(infile, "r") as inf:
with open(outfile, "w") as outf:
rc = subprocess.call(args, stdout=outf, stdin=inf, stderr=cmdtf)
if rc != 0:
warning("error: command failed (rc=%d) cmd: %s" % (rc, cmd))
warning("output from failing command:")
subprocess.call(["cat", cmdtf.name])
return 1
verbose(2, "+ finished: %s < %s > %s" % (cmd, infile, outfile))
return 0
# invoke command, returning array of lines read from it
def docmdlines(cmd, nf=None):
"""Run a command via subprocess, returning output as an array of lines."""
verbose(2, "+ docmdlines executing: %s" % cmd)
args = shlex.split(cmd)
mypipe = subprocess.Popen(args, stdout=subprocess.PIPE)
encoding = locale.getdefaultlocale()[1]
pout, perr = mypipe.communicate()
if mypipe.returncode != 0:
if perr:
decoded_err = perr.decode(encoding)
warning(decoded_err)
if nf:
return None
error("command failed (rc=%d): cmd was %s" % (mypipe.returncode, args))
decoded = pout.decode(encoding)
lines = decoded.strip().split("\n")
return lines
# invoke command, returning raw bytes from read
def docmdbytes(cmd, nf=None):
"""Run a command via subprocess, returning output as raw bytestring."""
args = shlex.split(cmd)
mypipe = subprocess.Popen(args, stdout=subprocess.PIPE)
pout, perr = mypipe.communicate()
if mypipe.returncode != 0:
encoding = locale.getdefaultlocale()[1]
if perr:
decoded_err = perr.decode(encoding)
warning(decoded_err)
if nf:
return None
error("command failed (rc=%d): cmd was %s" % (mypipe.returncode, args))
return pout
# invoke a command with input coming from an echo'd string, e.g.
# Ex: "echo 1+2 | perl"
def docmdinstring(cmd, instring):
"""Invoke a command with stdin coming from a specific string."""
verbose(2, "+ docmdinstring executing: echo %s | %s " % (cmd, instring))
args = shlex.split(cmd)
mypipe = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
encoding = locale.getdefaultlocale()[1]
pout, perr = mypipe.communicate(instring)
if mypipe.returncode != 0:
if perr:
decoded_err = perr.decode(encoding)
warning(decoded_err)
error("command failed (rc=%d): cmd was %s" % (mypipe.returncode, args))
decoded = pout.decode(encoding)
lines = decoded.strip().split("\n")
return lines
# Execute a command with an alarm timeout.
def docmdwithtimeout(cmd, timeout_duration):
"""Run a command via subprocess, returning exit status or -1 if timeout."""
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError()
# set the timeout handler
prevhandler = signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_duration)
try:
result = docmdnf(cmd)
except TimeoutError as exc:
verbose(1, "timeout triggered after %d seconds" % timeout_duration)
result = -1
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, prevhandler)
return result
# perform default locale setup if needed
def setdeflanglocale():
if "LANG" not in os.environ:
warning("no env setting for LANG -- using default values")
os.environ["LANG"] = "en_US.UTF-8"
os.environ["LANGUAGE"] = "en_US:"
def determine_btrfs_ssdroot(here):
"""Determine ssd root."""
path_components = here.split("/")
root = "/%s" % path_components[1]
verbose(2, "cwd=%s root=%s" % (here, root))
# Is this a BTRFS ssd to begin with?
outlines = docmdlines("stat -f --printf=%%T %s" % root)
if not outlines:
error("internal error-- could not determine FS type "
"for root dir %s" % root)
if outlines[0] != "btrfs":
error("current FS type is %s, not btrfs (can't proceed)" % outlines[0])
return root
def hr_size_to_bytes(sz):
"""Convert human readable size back to bytes."""
m = hrszre.match(sz)
if not m:
warning("unmatchable size expr %s" % sz)
return None
val = float(m.group(1))
facs = m.group(2)
if facs not in factors:
warning("unknown factor '%s' in size expr %s" % (facs, sz))
return None
fac = factors[facs]
nb = int(val * fac)
return nb
def trim_perf_report_file(infile):
"""Trim trailing spaces from lines in perf report."""
verbose(1, "trim: reading " + infile)
try:
f = open(infile, "r")
except IOError:
warning("unable to open file %s for reading" % infile)
return 1
lines = f.readlines()
f.close()
verbose(1, "trim: rewriting " + infile)
try:
ft = open(infile, "w")
except IOError:
warning("unable to open file %s for rewriting" % infile)
return 1
for line in lines:
sline = line.rstrip()
ft.write(sline + "\n")
ft.close()
return 0
|
|
'''
Greymind Sequencer for Maya
Version: 1.8.0
(c) 2009 - 2015 Greymind Inc.
Balakrishnan (Balki) Ranganathan (balki_live_com)
All Rights Reserved.
'''
# from Common import *
from functools import partial as Partial
SequencerVersion = "1.8.0"
class Animation:
Id = -1
Name = ""
StartFrame = -1
EndFrame = -1
Selected = False
def __init__(self, name, startFrame, endFrame, selected=False, id=-1):
self.Name = name
self.StartFrame = startFrame
self.EndFrame = endFrame
self.Selected = selected
self.Id = id
class AnimationUI:
NameTextField = None
StartFrameTextField = None
EndFrameTextField = None
SelectedCheckBox = None
SetButton = None
Animation = None
SequencerUI = None
def __init__(self, sequencerUI, animation):
self.Create(sequencerUI, animation)
def Name_Changed(self, extraArg=None):
self.Animation.Name = Cmds.textField(self.NameTextField, q=True, text=True)
self.SequencerUI.Save()
self.SequencerUI.Update()
def StartFrame_Changed(self, extraArg=None):
self.Animation.StartFrame = int(Cmds.textField(self.StartFrameTextField, q=True, text=True))
self.SequencerUI.Save()
self.SequencerUI.Update()
def EndFrame_Changed(self, extraArg=None):
self.Animation.EndFrame = int(Cmds.textField(self.EndFrameTextField, q=True, text=True))
self.SequencerUI.Save()
self.SequencerUI.Update()
def Select_Changed(self, extraArg=None):
self.Animation.Selected = Cmds.checkBox(self.SelectedCheckBox, q=True, value=True)
self.SequencerUI.Save()
self.SequencerUI.Update()
def Set_Clicked(self, extraArg=None):
self.SequencerUI.Save()
self.SequencerUI.Update()
Cmds.playbackOptions(e=True, min=self.Animation.StartFrame, max=self.Animation.EndFrame)
def Create(self, sequencerUI, animation):
self.SequencerUI = sequencerUI
self.Animation = animation
self.SelectedCheckBox = Cmds.checkBox(label='', value=animation.Selected, cc=self.Select_Changed)
self.NameTextField = Cmds.textField(text=animation.Name, width=183, cc=self.Name_Changed)
self.StartFrameTextField = Cmds.textField(text=str(animation.StartFrame), cc=self.StartFrame_Changed)
self.EndFrameTextField = Cmds.textField(text=str(animation.EndFrame), cc=self.EndFrame_Changed)
self.SetButton = Cmds.button(label='Set', c=self.Set_Clicked)
def Destroy(self):
Cmds.deleteUI(self.SelectedCheckBox)
Cmds.deleteUI(self.NameTextField)
Cmds.deleteUI(self.StartFrameTextField)
Cmds.deleteUI(self.EndFrameTextField)
class Sequencer:
UniqueId = 0 # Stores the top unique id
Animations = {} # Stores the animations keyed by a unique id
Ordering = [] # Stores the order in which the animations are to be displayed by the UI
def GetUniqueId(self):
self.UniqueId = self.UniqueId + 1
return (self.UniqueId - 1)
def Count(self):
if not len(self.Animations) == len(self.Ordering):
print "Warning: Animations dictionary and Ordering list do not have same number of elements"
return len(self.Animations)
def StartFrame(self):
startFrame = int(1e6)
for animationId in self.Animations:
if self.Animations[animationId].StartFrame < startFrame:
startFrame = self.Animations[animationId].StartFrame
if startFrame == int(1e6):
startFrame = 0
return startFrame
def EndFrame(self):
endFrame = 0
for animationId in self.Animations:
if self.Animations[animationId].EndFrame > endFrame:
endFrame = self.Animations[animationId].EndFrame
return endFrame
def AddAnimationWithId(self, animation):
self.Animations[animation.Id] = animation
self.Ordering.append(animation.Id)
def AddAnimation(self, animation):
# Check if it exists perhaps?
animation.Id = self.GetUniqueId()
self.AddAnimationWithId(animation)
def RemoveAnimation(self, animationId):
del self.Animations[animationId]
self.Ordering.remove(animationId)
class SequencerUI:
progressBar = ""
windowName = ""
windowLayout = ""
animationsLayout = ""
buttonsLayout = ""
startFrameTextBox = ""
endFrameTextBox = ""
selectAll = ""
windowTitle = ""
width = 0
height = 0
sequencer = None
IncludePlayblastLinkCheckBox = None
AnimationUIs = {}
def __init__(self):
self.progressBar = Mel.eval('$tmp = $gMainProgressBar');
self.windowName = "SequencerWindow"
self.windowLayout = "SequencerLayout"
self.animationsLayout = "SequencerAnimations"
self.buttonsLayout = "SequencerButtons"
self.selectAll = "SequencerSelectAll"
self.startFrameTextBox = "SequencerStartFrameTextBox"
self.endFrameTextBox = "SequencerEndFrameTextBox"
self.prefixTextBox = "SequencerPrefixTextBox"
self.windowTitle = "Sequencer " + SequencerVersion
self.width = 400
self.height = 550
self.Load()
def AttributeName(self, baseNode, attributeName):
return "%s.%s" % (baseNode, attributeName)
def Load(self):
'''
Populate sequencer class from maya nodes
'''
Attr = self.AttributeName
self.sequencer = Sequencer()
'''
self.sequencer.AddAnimation(Animation("Idle", 1, 30))
self.sequencer.AddAnimation(Animation("Run", 61, 90))
self.sequencer.AddAnimation(Animation("Walk", 31, 60))
return
'''
sequencerNode = "SequencerData"
if not Cmds.objExists(sequencerNode):
return
self.sequencer.UniqueId = Cmds.getAttr(Attr(sequencerNode, 'UniqueId'))
ordering = Cmds.getAttr(Attr(sequencerNode, 'Ordering'))
if ordering is None or len(ordering) == 0:
return
try:
for orderId in range(len(ordering)):
animationId = ordering[orderId]
parentAttribute = Attr(sequencerNode, 'Animation%d' % animationId)
name = Cmds.getAttr(Attr(parentAttribute, "Name%d" % animationId))
startFrame = Cmds.getAttr(Attr(parentAttribute, "StartFrame%d" % animationId))
endFrame = Cmds.getAttr(Attr(parentAttribute, "EndFrame%d" % animationId))
selected = Cmds.getAttr(Attr(parentAttribute, "Selected%d" % animationId))
animation = Animation(name, startFrame, endFrame, selected, animationId)
self.sequencer.AddAnimationWithId(animation)
except TypeError:
print "There seems to be an error loading sequencer data, reseting Sequencer"
Cmds.delete(sequencerNode)
def Save(self):
'''
Save sequencer data back into maya nodes
'''
Attr = self.AttributeName
selection = Cmds.ls(selection=True)
sequencerNode = "SequencerData"
if Cmds.objExists(sequencerNode):
Cmds.delete(sequencerNode)
Cmds.createNode('script', name=sequencerNode)
Cmds.addAttr(sequencerNode, longName='UniqueId', attributeType='long', storable=True)
Cmds.setAttr(Attr(sequencerNode, 'UniqueId'), self.sequencer.UniqueId)
Cmds.addAttr(sequencerNode, longName='Ordering', dataType='Int32Array', storable=True)
Cmds.setAttr(Attr(sequencerNode, 'Ordering'), self.sequencer.Ordering, type='Int32Array')
if self.sequencer.Count() == 0:
return
Cmds.addAttr(sequencerNode, longName='Animations', attributeType='compound', numberOfChildren=self.sequencer.Count())
for animation in self.sequencer.Animations.values():
parentAttribute = Attr(sequencerNode, 'Animation%d' % animation.Id)
Cmds.addAttr(longName="Animation%d" % animation.Id, attributeType='compound', numberOfChildren=4, parent='Animations')
Cmds.addAttr(longName="Name%d" % animation.Id, dataType='string', parent='Animation%d' % animation.Id)
Cmds.addAttr(longName="StartFrame%d" % animation.Id, attributeType='long', parent='Animation%d' % animation.Id)
Cmds.addAttr(longName="EndFrame%d" % animation.Id, attributeType='long', parent='Animation%d' % animation.Id)
Cmds.addAttr(longName="Selected%d" % animation.Id, attributeType='bool', parent='Animation%d' % animation.Id)
for animation in self.sequencer.Animations.values():
parentAttribute = Attr(sequencerNode, 'Animation%d' % animation.Id)
Cmds.setAttr(Attr(parentAttribute, "Name%d" % animation.Id), animation.Name, type='string')
Cmds.setAttr(Attr(parentAttribute, "StartFrame%d" % animation.Id), animation.StartFrame)
Cmds.setAttr(Attr(parentAttribute, "EndFrame%d" % animation.Id), animation.EndFrame)
Cmds.setAttr(Attr(parentAttribute, "Selected%d" % animation.Id), animation.Selected)
#Cmds.select(cl=True)
if len(selection) > 0:
Cmds.select(selection)
def CreateAnimationEntry(self, animation):
'''
Create the UI entry for the animation
'''
Cmds.setParent(self.animationsLayout)
Cmds.rowLayout(numberOfColumns = 5, columnWidth5 = [35, 185, 50, 50, 35], columnAttach=[1, 'left', 8])
self.AnimationUIs[animation.Id] = AnimationUI(self, animation)
def Refresh(self, extraArg=None):
'''
Refreshes the animations UI
'''
# Process select all checkbox value
selected = False
if Cmds.checkBox(self.selectAll, q=True, exists=True):
selected = Cmds.checkBox(self.selectAll, q=True, value=True)
if selected == True:
for animation in self.sequencer.Animations.values():
if animation.Selected == False:
selected = False
break
# Clear all UI items
for animationId in self.AnimationUIs:
self.AnimationUIs[animationId].Destroy()
self.AnimationUIs = {}
# Animations Layout
Cmds.setParent(self.windowLayout)
if Cmds.columnLayout(self.animationsLayout, exists=True):
Cmds.deleteUI(self.animationsLayout)
Cmds.columnLayout(self.animationsLayout, adjustableColumn=True)
self.CreateSeparator(self.animationsLayout, 'out')
# Header row
Cmds.rowLayout(numberOfColumns = 4, columnWidth4 = [35, 185, 50, 50], columnAttach=[1, 'left', 8])
Cmds.checkBox(self.selectAll, label='', cc=Partial(self.SelectAll), value=selected)
Cmds.text(label=' Animation Name')
Cmds.text(label=' Start')
Cmds.text(label=' End')
self.CreateSeparator(self.animationsLayout, 'out')
# Add back based on order
#print self.sequencer.Ordering
#return
for orderId in range(len(self.sequencer.Ordering)):
animationId = self.sequencer.Ordering[orderId]
self.CreateAnimationEntry(self.sequencer.Animations[animationId])
# Update
self.Update()
# Save
self.Save()
def Update(self):
# Skyrigger tools
Cmds.textField(self.startFrameTextBox, e=True, text=str(self.sequencer.StartFrame()))
Cmds.textField(self.endFrameTextBox, e=True, text=str(self.sequencer.EndFrame()))
def MoveUp(self, extraArg=None):
'''
Moves a given entry (or a collection of entries) up
'''
for index in range(self.sequencer.Count()):
animation = self.sequencer.Animations[self.sequencer.Ordering[index]]
if animation.Selected:
if not index == 0:
temp = self.sequencer.Ordering[index - 1]
self.sequencer.Ordering[index - 1] = animation.Id
self.sequencer.Ordering[index] = temp
self.Refresh()
def MoveDown(self, extraArg=None):
'''
Moves a given entry (or a collection of entries) down
'''
for index in reversed(range(self.sequencer.Count())):
animation = self.sequencer.Animations[self.sequencer.Ordering[index]]
if animation.Selected:
if not index == self.sequencer.Count() - 1:
temp = self.sequencer.Ordering[index + 1]
self.sequencer.Ordering[index + 1] = animation.Id
self.sequencer.Ordering[index] = temp
self.Refresh()
def SelectAll(self, extraArg=None):
selected = Cmds.checkBox(self.selectAll, q=True, value=True)
for animation in self.sequencer.Animations.values():
animation.Selected = selected
self.Refresh()
def Add(self, extraArg=None):
self.sequencer.AddAnimation(Animation("", self.sequencer.EndFrame() + 1, self.sequencer.EndFrame() + 30))
self.Refresh()
def Delete(self, extraArg=None):
animationsToRemove = []
for animationId in self.sequencer.Animations.keys():
if self.sequencer.Animations[animationId].Selected == True:
animationsToRemove.append(animationId)
for animationId in animationsToRemove:
self.sequencer.RemoveAnimation(animationId)
self.Refresh()
def SetPlaybackRange(self, startTime, endTime):
Cmds.playbackOptions(e=True, min=startTime, max=endTime)
def MessageBox(self, dialogMessage, dialogTitle = "Sequencer", dialogButtons=["Ok"]):
Cmds.confirmDialog(title=dialogTitle, message=dialogMessage, button=dialogButtons)
def InputBox(self, dialogMessage, defaultText="", dialogTitle = "Sequencer", dialogButtons=["Ok", "Cancel"]):
result = Cmds.promptDialog(title=dialogTitle, message=dialogMessage, text=defaultText, button=dialogButtons, defaultButton="OK", cancelButton="Cancel", dismissString="Cancel")
if result == "Ok":
return Cmds.promptDialog(query=True, text=True)
return ""
def CountSelected(self):
selectedCount = 0
for animation in self.sequencer.Animations.values():
if animation.Selected == True:
selectedCount = selectedCount + 1
return selectedCount
def StartProgressBar(self, statusMessage, maximumValue):
Cmds.progressBar(self.progressBar, edit=True, beginProgress=True, isInterruptable=True,
status=statusMessage, maxValue=maximumValue)
def IsProgressBarCanceled(self):
if Cmds.progressBar(self.progressBar, query=True, isCancelled=True):
return True
else:
return False
def UpdateProgressBar(self, progress):
Cmds.progressBar(self.progressBar, edit=True, pr=progress)
def StepProgressBar(self, stepAmount):
Cmds.progressBar(self.progressBar, edit=True, step=stepAmount)
def EndProgressBar(self):
Cmds.progressBar(self.progressBar, edit=True, endProgress=True)
def ImportMoveLister(self, extraArg=None):
if Cmds.objExists("MoveLister"):
totalMoves = Cmds.getAttr("MoveLister.totalMoves")
for i in range(0, totalMoves):
nodePrefix = "MoveLister.move" + str(i)
moveNameNode = nodePrefix + "Name"
moveName = Cmds.getAttr(moveNameNode)
moveMinNode = nodePrefix + "Min"
moveMin = Cmds.getAttr(moveMinNode)
moveMaxNode = nodePrefix + "Max"
moveMax = Cmds.getAttr(moveMaxNode)
self.sequencer.AddAnimation(Animation(moveName, moveMin, moveMax))
self.Refresh()
else:
self.MessageBox('MoveLister data not found!')
def PlayblastDisplayEnable(self, enable=True):
Cmds.setAttr('persp.displayFilmOrigin', enable)
Cmds.setAttr('persp.displayFilmPivot', enable)
Cmds.setAttr('persp.displaySafeTitle', enable)
Cmds.setAttr('persp.displaySafeAction', enable)
Cmds.setAttr('persp.displayFieldChart', enable)
Cmds.setAttr('persp.displayResolution', enable)
Cmds.setAttr('persp.displayFilmGate', enable)
def Export(self, extraArg=None):
directoryName = os.path.dirname(Cmds.file(q=True, sn=True))
if not directoryName:
self.MessageBox('Please save Maya file before exporting.')
return
selectedCount = self.CountSelected()
if selectedCount == 0:
self.MessageBox('Please select animations to export!')
return
includePlayblastLink = Cmds.checkBox(self.IncludePlayblastLinkCheckBox, q=True, value=True)
playblastFolder = ""
if includePlayblastLink:
playblastFolder = self.InputBox("Enter directory that contains the playblasts", directoryName)
if not playblastFolder:
self.MessageBox("Please enter a playblast folder. Export canceled.")
return
playblastPrefix = self.InputBox("Enter prefix (if any)")
now = datetime.datetime.now()
exportFilename = "%s/Export %d%d%d-%d%d%d.csv" % (directoryName, now.year, now.month, now.day, now.hour, now.minute, now.second)
exportFile = open(exportFilename, "w")
if not includePlayblastLink:
exportFile.write("%s,%s,%s\n" % ('Animation Name', 'Start Frame', 'End Frame'))
else:
exportFile.write("%s,%s,%s,%s\n" % ('Animation Name', 'Start Frame', 'End Frame', 'Playblast'))
for animation in self.sequencer.Animations.values():
if animation.Selected == True:
if not includePlayblastLink:
exportFile.write("%s,%d,%d\n" % (animation.Name, animation.StartFrame, animation.EndFrame))
else:
playblastLink = "%s.avi" % self.GetPlayblastMovieFilename(playblastFolder, playblastPrefix, animation)
if os.path.isfile(playblastLink):
exportFile.write("%s,%d,%d,\"=HYPERLINK(\"\"%s\"\", \"\"[open]\"\")\"\n" % (animation.Name, animation.StartFrame, animation.EndFrame, playblastLink))
else:
exportFile.write("%s,%d,%d,n/a\n" % (animation.Name, animation.StartFrame, animation.EndFrame))
exportFile.close()
choice = Cmds.confirmDialog(title='Export Complete!', message='Do you want to open the file %s now?' % (exportFilename), button=['Yes','No'], defaultButton='Yes', cancelButton='No', dismissString='No')
if choice == 'Yes':
os.startfile(exportFilename)
def GeneratePlayblast(self, extraArg=None):
prefixText = Cmds.textField(self.prefixTextBox, q=True, text=True)
if not IsNoneOrEmpty(prefixText):
prefixText = "%s_" % prefixText
cameraName = 'persp'
blackThick = 1
horizontalResolution = 320
verticalResolution = 240
scalePercentage = 100
if not Cmds.objExists(cameraName):
self.MessageBox('Playblast generation requires a camera named %s.' % (cameraName), 'Playblast pre-requisite error')
return
self.PlayblastDisplayEnable(False)
directoryName = os.path.dirname(Cmds.file(q=True, sn=True))
if not directoryName:
self.MessageBox('Please save Maya file before blasting!')
return
selectedCount = self.CountSelected()
if selectedCount == 0:
self.MessageBox('Please select animations to blast!')
return
self.StartProgressBar('Generating Playblast', selectedCount)
for animation in self.sequencer.Animations.values():
if self.IsProgressBarCanceled():
self.EndProgressBar()
self.MessageBox('Playblast generation canceled by the user')
return
if animation.Selected == True:
self.SetPlaybackRange(animation.StartFrame, animation.EndFrame)
movieFilename = self.GetPlayblastMovieFilename(directoryName, prefixText, animation)
Cmds.playblast(format='movie', filename=movieFilename, clearCache=True, viewer=False, showOrnaments=True, fp=4, percent=scalePercentage, compression='none', widthHeight=(horizontalResolution, verticalResolution), fo=True)
self.StepProgressBar(1)
self.EndProgressBar()
self.MessageBox('Playblast generation complete!')
def GetPlayblastMovieFilename(self, directoryName, prefixText, animation):
assert(directoryName)
assert(animation)
return "%s/%s%s" % (directoryName, prefixText, animation.Name.replace("\"", "_").replace("*", "_"))
def BakeKeys(self, extraArg=None):
selection = Cmds.ls(selection=True)
if not len(selection) == 1 or not Cmds.nodeType(selection) == 'joint':
self.MessageBox('Please select (only) the topmost joint of the skeletal system', 'Bake keys pre-requisite error')
return
if self.sequencer.EndFrame() > self.sequencer.StartFrame():
Bake(self.sequencer.StartFrame(), self.sequencer.EndFrame())
self.MessageBox('Bake complete!')
def DeleteRigControls(self, extraArg=None):
selection = Cmds.ls(selection=True)
if not len(selection) == 1 or not Cmds.nodeType(selection) == 'joint':
self.MessageBox('Please select (only) the topmost joint of the skeletal system', 'Delete rig controls pre-requisite error')
return
controlGroupName = selection[0].replace('Joint_Root', 'Control')
Cmds.delete(controlGroupName)
skeletonFKGroupName = selection[0].replace('Joint_Root', 'Skeleton_FK')
Cmds.delete(skeletonFKGroupName)
self.MessageBox('Rig controls deleted!')
def TrimKeys(self, extraArg=None):
selection = Cmds.ls(selection=True)
if not len(selection) == 1 or not Cmds.nodeType(selection) == 'joint':
self.MessageBox('Please select (only) the topmost joint of the skeletal system', 'Trim keys pre-requisite error')
return
trimStart = int(Cmds.textField(self.startFrameTextBox, q=True, text=True))
trimEnd = int(Cmds.textField(self.endFrameTextBox, q=True, text=True))
if trimStart < 0:
self.MessageBox('Trim can start only from 0. Please ensure start frame is valid.', 'Trim keys pre-requisite error')
trimRegions = [0] * (trimEnd + 1)
for animation in self.sequencer.Animations.values():
trimRegions[animation.StartFrame:animation.EndFrame + 1] = [1] * (animation.EndFrame - animation.StartFrame + 1)
i = 0
while i < len(trimRegions):
tStart = FindIndexOf(trimRegions, 0, i, trimEnd)
tEnd = FindIndexOf(trimRegions, 1, tStart, trimEnd) - 1
if tEnd < tStart:
break
Cmds.cutKey(selection, animation='keysOrObjects', option='keys', clear=True, hierarchy='below', time=(tStart,tEnd))
i = tEnd + 1
i = i + 1
self.MessageBox('Trim complete!')
def GenerateFbx(self, extraArg=None):
prefixText = Cmds.textField(self.prefixTextBox, q=True, text=True)
if not IsNoneOrEmpty(prefixText):
prefixText = "%s_" % prefixText
mayaFile = Cmds.file(q=True, sn=True)
directoryName = os.path.dirname(mayaFile)
fileName = os.path.splitext(os.path.basename(mayaFile))[0]
generatedAnimationFiles = []
createFiles = True
for animation in self.sequencer.Animations.values():
if animation.Selected == True:
negativeStartFrame = -animation.StartFrame
if createFiles == True:
Cmds.select(all=True)
Cmds.cutKey(time=(self.sequencer.StartFrame() - 10, animation.StartFrame))
Cmds.cutKey(time=(animation.EndFrame, self.sequencer.EndFrame() + 10))
Cmds.keyframe(edit=True, relative=True, timeChange=negativeStartFrame, time=(animation.StartFrame, animation.EndFrame))
fbxFilename = "%s/%s%s.fbx" % (directoryName, prefixText, animation.Name)
generatedAnimationFiles.append([animation, fbxFilename])
if createFiles == True:
melCode = 'FBXExport -f "%s"' % fbxFilename
Mel.eval(melCode)
Cmds.undo()
Cmds.undo()
Cmds.undo()
Cmds.undo()
# Make sure at least some files were selected
if len(generatedAnimationFiles) == 0:
self.MessageBox('Please select at least one animation to export')
return
# Ready the progress bar
#self.StartProgressBar('Generating FBX', len(generatedAnimationFiles))
# Open each of these files and stitch them
masterFilename = "%s/%s%s.fbx" % (directoryName, prefixText, fileName)
masterFile = open(masterFilename, "w")
for animationFileIndex in range(len(generatedAnimationFiles)):
'''
if self.IsProgressBarCanceled():
self.EndProgressBar()
self.MessageBox('FBX generation canceled by user!')
return
'''
generatedAnimationFile = generatedAnimationFiles[animationFileIndex]
generatedAnimation = generatedAnimationFile[0]
generatedFilename = generatedAnimationFile[1]
generatedFile = open(generatedFilename, "r")
preTake = True
inTake = False
postTake = False
takeString = "Take: \"Take 001\" {"
endTakeString = "}"
takeLevel = " "
for currentLine in generatedFile:
strippedLine = currentLine.strip(" \r\n")
if not IsNoneOrEmpty(strippedLine):
splitLine = currentLine.split(strippedLine)
else:
splitLine = [''] * 2
lineStartsWith = splitLine[0]
lineEndsWith = splitLine[1]
if strippedLine == takeString:
inTake = True
preTake = False
takeLevel = lineStartsWith
currentLine = currentLine.replace("Take 001", generatedAnimation.Name)
# For the first file, pick from top of file until the take also
if animationFileIndex == 0 and preTake == True:
masterFile.write(currentLine)
# For all other files, just cut the take and paste it in
if inTake == True:
masterFile.write(currentLine)
# For the last file, pick after the take to the bottom of file
if animationFileIndex == (len(generatedAnimationFiles) - 1) and postTake == True:
masterFile.write(currentLine)
if inTake == True and strippedLine == endTakeString and lineStartsWith == takeLevel:
inTake = False
postTake = True
#self.StepProgressBar(1)
masterFile.close()
self.MessageBox('FBX generation complete!')
def Create(self):
'''
Creates the window
'''
if Cmds.window(self.windowName, exists=True):
Cmds.deleteUI(self.windowName)
# Main window
Cmds.window(self.windowName, title=self.windowTitle, widthHeight=[self.width, self.height])
Cmds.scrollLayout(hst = 16, vst = 16)
Cmds.columnLayout(self.windowLayout)
self.CreateSeparator()
# Buttons
Cmds.rowLayout(self.buttonsLayout, numberOfColumns = 6, columnWidth6=[30, 48, 55, 75, 75, 75], columnAlign6=['left', 'left', 'left', 'left', 'left', 'left'])
Cmds.button(label='Add', backgroundColor=[0.6, 0.9, 0.6], c=Partial(self.Add))
Cmds.button(label='Delete', backgroundColor=[0.9, 0.6, 0.6], c=Partial(self.Delete))
Cmds.button(label='Move Up', c=Partial(self.MoveUp))
Cmds.button(label='Move Down', c=Partial(self.MoveDown))
Cmds.button(label='Refresh', backgroundColor=[0.6, 0.6, 0.9], c=Partial(self.Refresh))
self.CreateSeparator()
# Tool controls
Cmds.frameLayout(label="Tool Controls", collapsable=True, collapse=False)
Cmds.columnLayout(width = self.width - 5)
Cmds.rowLayout(numberOfColumns = 2, columnWidth2=[45, 290], columnAlign2=['left', 'left'])
Cmds.text(label=' Prefix')
Cmds.textField(self.prefixTextBox, width = 288)
Cmds.setParent('..')
Cmds.rowLayout(numberOfColumns = 2, columnWidth2=[200, 48], columnAlign2=['left', 'left'])
Cmds.text(label=' To import from MoveLister')
Cmds.button(label='Import ML', c=Partial(self.ImportMoveLister), backgroundColor=[0.9, 0.9, 0.8])
Cmds.setParent('..')
Cmds.rowLayout(numberOfColumns = 2, columnWidth2=[200, 48], columnAlign2=['left', 'left'])
Cmds.text(label=' To generate multiple playblasts')
Cmds.button(label='PlayBlast', c=Partial(self.GeneratePlayblast), backgroundColor=[0.9, 0.9, 0.8])
Cmds.setParent('..')
Cmds.rowLayout(numberOfColumns = 3, columnWidth3=[200, 90, 48], columnAlign3=['left', 'left', 'left'])
Cmds.text(label=' To export animation list as CSV')
Cmds.button(label='Export to CSV', c=Partial(self.Export), backgroundColor=[0.9, 0.9, 0.8])
self.IncludePlayblastLinkCheckBox = Cmds.checkBox(label='playblast link')
Cmds.setParent('..')
Cmds.rowLayout(numberOfColumns = 2, columnWidth2=[200, 48], columnAlign2=['left', 'left'])
Cmds.text(label=' To generate animation-aware FBX')
Cmds.button(label='Generate FBX', c=Partial(self.GenerateFbx), backgroundColor=[0.9, 0.9, 0.8])
self.CreateSeparator()
# Skyrigger controls
Cmds.frameLayout(label="Skyrigger and Animation Controls", collapsable=True, collapse=False)
Cmds.columnLayout(width = self.width - 5)
Cmds.rowLayout(numberOfColumns = 4, columnWidth4=[60, 55, 55, 55], columnAlign4=['left', 'left', 'left', 'left'])
Cmds.text(label=' Start frame')
Cmds.textField(self.startFrameTextBox)
Cmds.text(label='End frame')
Cmds.textField(self.endFrameTextBox)
Cmds.setParent('..')
Cmds.rowLayout(numberOfColumns = 2, columnWidth2=[200, 48], columnAlign2=['left', 'left'])
Cmds.text(label=' To bake the keys')
Cmds.button(label='Bake Keys', c=Partial(self.BakeKeys), backgroundColor=[0.8, 0.8, 0.9])
Cmds.setParent('..')
Cmds.rowLayout(numberOfColumns = 2, columnWidth2=[200, 48], columnAlign2=['left', 'left'])
Cmds.text(label=' To delete the rig controls')
Cmds.button(label='Delete Rig Controls', c=Partial(self.DeleteRigControls), backgroundColor=[0.9, 0.8, 0.8])
Cmds.setParent('..')
Cmds.rowLayout(numberOfColumns = 2, columnWidth2=[200, 48], columnAlign2=['left', 'left'])
Cmds.text(label=' To trim the keys between moves')
Cmds.button(label='Trim Keys', c=Partial(self.TrimKeys), backgroundColor=[0.8, 0.9, 0.8])
self.CreateSeparator('..')
self.Refresh()
def CreateSeparator(self, parent=None, separatorStyle='double'):
'''
Creates a separator
'''
if parent == None:
parent = self.windowLayout
Cmds.setParent(parent)
Cmds.separator(style=separatorStyle, width=1)
def Show(self):
Cmds.showWindow(self.windowName)
def Run():
sequencerUI = SequencerUI()
sequencerUI.Create()
sequencerUI.Show()
|
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:65015")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:65015")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
"""Platform for climate integration."""
from __future__ import annotations
from datetime import timedelta
from typing import Any
from pymelcloud import DEVICE_TYPE_ATA, DEVICE_TYPE_ATW, AtaDevice, AtwDevice
import pymelcloud.ata_device as ata
import pymelcloud.atw_device as atw
from pymelcloud.atw_device import (
PROPERTY_ZONE_1_OPERATION_MODE,
PROPERTY_ZONE_2_OPERATION_MODE,
Zone,
)
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
DEFAULT_MAX_TEMP,
DEFAULT_MIN_TEMP,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_platform
from . import MelCloudDevice
from .const import (
ATTR_STATUS,
ATTR_VANE_HORIZONTAL,
ATTR_VANE_HORIZONTAL_POSITIONS,
ATTR_VANE_VERTICAL,
ATTR_VANE_VERTICAL_POSITIONS,
CONF_POSITION,
DOMAIN,
SERVICE_SET_VANE_HORIZONTAL,
SERVICE_SET_VANE_VERTICAL,
)
SCAN_INTERVAL = timedelta(seconds=60)
ATA_HVAC_MODE_LOOKUP = {
ata.OPERATION_MODE_HEAT: HVAC_MODE_HEAT,
ata.OPERATION_MODE_DRY: HVAC_MODE_DRY,
ata.OPERATION_MODE_COOL: HVAC_MODE_COOL,
ata.OPERATION_MODE_FAN_ONLY: HVAC_MODE_FAN_ONLY,
ata.OPERATION_MODE_HEAT_COOL: HVAC_MODE_HEAT_COOL,
}
ATA_HVAC_MODE_REVERSE_LOOKUP = {v: k for k, v in ATA_HVAC_MODE_LOOKUP.items()}
ATW_ZONE_HVAC_MODE_LOOKUP = {
atw.ZONE_OPERATION_MODE_HEAT: HVAC_MODE_HEAT,
atw.ZONE_OPERATION_MODE_COOL: HVAC_MODE_COOL,
}
ATW_ZONE_HVAC_MODE_REVERSE_LOOKUP = {v: k for k, v in ATW_ZONE_HVAC_MODE_LOOKUP.items()}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up MelCloud device climate based on config_entry."""
mel_devices = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
AtaDeviceClimate(mel_device, mel_device.device)
for mel_device in mel_devices[DEVICE_TYPE_ATA]
]
+ [
AtwDeviceZoneClimate(mel_device, mel_device.device, zone)
for mel_device in mel_devices[DEVICE_TYPE_ATW]
for zone in mel_device.device.zones
],
True,
)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_VANE_HORIZONTAL,
{vol.Required(CONF_POSITION): cv.string},
"async_set_vane_horizontal",
)
platform.async_register_entity_service(
SERVICE_SET_VANE_VERTICAL,
{vol.Required(CONF_POSITION): cv.string},
"async_set_vane_vertical",
)
class MelCloudClimate(ClimateEntity):
"""Base climate device."""
_attr_temperature_unit = TEMP_CELSIUS
def __init__(self, device: MelCloudDevice) -> None:
"""Initialize the climate."""
self.api = device
self._base_device = self.api.device
async def async_update(self):
"""Update state from MELCloud."""
await self.api.async_update()
@property
def device_info(self):
"""Return a device description for device registry."""
return self.api.device_info
@property
def target_temperature_step(self) -> float | None:
"""Return the supported step of target temperature."""
return self._base_device.temperature_increment
class AtaDeviceClimate(MelCloudClimate):
"""Air-to-Air climate device."""
_attr_supported_features = (
SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE | SUPPORT_SWING_MODE
)
def __init__(self, device: MelCloudDevice, ata_device: AtaDevice) -> None:
"""Initialize the climate."""
super().__init__(device)
self._device = ata_device
self._attr_name = device.name
self._attr_unique_id = f"{self.api.device.serial}-{self.api.device.mac}"
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return the optional state attributes with device specific additions."""
attr = {}
vane_horizontal = self._device.vane_horizontal
if vane_horizontal:
attr.update(
{
ATTR_VANE_HORIZONTAL: vane_horizontal,
ATTR_VANE_HORIZONTAL_POSITIONS: self._device.vane_horizontal_positions,
}
)
vane_vertical = self._device.vane_vertical
if vane_vertical:
attr.update(
{
ATTR_VANE_VERTICAL: vane_vertical,
ATTR_VANE_VERTICAL_POSITIONS: self._device.vane_vertical_positions,
}
)
return attr
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
mode = self._device.operation_mode
if not self._device.power or mode is None:
return HVAC_MODE_OFF
return ATA_HVAC_MODE_LOOKUP.get(mode)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
await self._device.set({"power": False})
return
operation_mode = ATA_HVAC_MODE_REVERSE_LOOKUP.get(hvac_mode)
if operation_mode is None:
raise ValueError(f"Invalid hvac_mode [{hvac_mode}]")
props = {"operation_mode": operation_mode}
if self.hvac_mode == HVAC_MODE_OFF:
props["power"] = True
await self._device.set(props)
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_OFF] + [
ATA_HVAC_MODE_LOOKUP.get(mode) for mode in self._device.operation_modes
]
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self._device.room_temperature
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self._device.target_temperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self._device.set(
{"target_temperature": kwargs.get("temperature", self.target_temperature)}
)
@property
def fan_mode(self) -> str | None:
"""Return the fan setting."""
return self._device.fan_speed
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
await self._device.set({"fan_speed": fan_mode})
@property
def fan_modes(self) -> list[str] | None:
"""Return the list of available fan modes."""
return self._device.fan_speeds
async def async_set_vane_horizontal(self, position: str) -> None:
"""Set horizontal vane position."""
if position not in self._device.vane_horizontal_positions:
raise ValueError(
f"Invalid horizontal vane position {position}. Valid positions: [{self._device.vane_horizontal_positions}]."
)
await self._device.set({ata.PROPERTY_VANE_HORIZONTAL: position})
async def async_set_vane_vertical(self, position: str) -> None:
"""Set vertical vane position."""
if position not in self._device.vane_vertical_positions:
raise ValueError(
f"Invalid vertical vane position {position}. Valid positions: [{self._device.vane_vertical_positions}]."
)
await self._device.set({ata.PROPERTY_VANE_VERTICAL: position})
@property
def swing_mode(self) -> str | None:
"""Return vertical vane position or mode."""
return self._device.vane_vertical
async def async_set_swing_mode(self, swing_mode) -> None:
"""Set vertical vane position or mode."""
await self.async_set_vane_vertical(swing_mode)
@property
def swing_modes(self) -> str | None:
"""Return a list of available vertical vane positions and modes."""
return self._device.vane_vertical_positions
async def async_turn_on(self) -> None:
"""Turn the entity on."""
await self._device.set({"power": True})
async def async_turn_off(self) -> None:
"""Turn the entity off."""
await self._device.set({"power": False})
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
min_value = self._device.target_temperature_min
if min_value is not None:
return min_value
return DEFAULT_MIN_TEMP
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
max_value = self._device.target_temperature_max
if max_value is not None:
return max_value
return DEFAULT_MAX_TEMP
class AtwDeviceZoneClimate(MelCloudClimate):
"""Air-to-Water zone climate device."""
_attr_max_temp = 30
_attr_min_temp = 10
_attr_supported_features = SUPPORT_TARGET_TEMPERATURE
def __init__(
self, device: MelCloudDevice, atw_device: AtwDevice, atw_zone: Zone
) -> None:
"""Initialize the climate."""
super().__init__(device)
self._device = atw_device
self._zone = atw_zone
self._attr_name = f"{device.name} {self._zone.name}"
self._attr_unique_id = f"{self.api.device.serial}-{atw_zone.zone_index}"
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the optional state attributes with device specific additions."""
data = {
ATTR_STATUS: ATW_ZONE_HVAC_MODE_LOOKUP.get(
self._zone.status, self._zone.status
)
}
return data
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
mode = self._zone.operation_mode
if not self._device.power or mode is None:
return HVAC_MODE_OFF
return ATW_ZONE_HVAC_MODE_LOOKUP.get(mode, HVAC_MODE_OFF)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
await self._device.set({"power": False})
return
operation_mode = ATW_ZONE_HVAC_MODE_REVERSE_LOOKUP.get(hvac_mode)
if operation_mode is None:
raise ValueError(f"Invalid hvac_mode [{hvac_mode}]")
if self._zone.zone_index == 1:
props = {PROPERTY_ZONE_1_OPERATION_MODE: operation_mode}
else:
props = {PROPERTY_ZONE_2_OPERATION_MODE: operation_mode}
if self.hvac_mode == HVAC_MODE_OFF:
props["power"] = True
await self._device.set(props)
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes."""
return [self.hvac_mode]
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self._zone.room_temperature
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self._zone.target_temperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self._zone.set_target_temperature(
kwargs.get("temperature", self.target_temperature)
)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## @package TMS1mmSingle
# Control module for the Topmetal-S 1mm Electrode single-chip test.
#
from __future__ import print_function
import copy
from command import *
import socket
import time
## Manage Topmetal-S 1mm chip's internal register map.
# Allow combining and disassembling individual registers
# to/from long integer for I/O
#
class TMS1mmReg(object):
## @var _defaultRegMap default register values
_defaultRegMap = {
'DAC' : [0x75c3, 0x8444, 0x7bbb, 0x7375, 0x86d4, 0xe4b2], # from DAC1 to DAC6
'PD' : [1, 1, 1, 1], # from PD1 to PD4, 1 means powered down
'K' : [1, 0, 1, 0, 1, 0, 0, 0, 0, 0], # from K1 to K10, 1 means closed (conducting)
'vref' : 0x8,
'vcasp' : 0x8,
'vcasn' : 0x8,
'vbiasp' : 0x8,
'vbiasn' : 0x8
}
## @var register map local to the class
_regMap = {}
def __init__(self):
self._regMap = copy.deepcopy(self._defaultRegMap)
def set_dac(self, i, val):
self._regMap['DAC'][i] = 0xffff & val
def set_power_down(self, i, onoff):
self._regMap['PD'][i] = 0x1 & onoff
def set_k(self, i, onoff):
self._regMap['K'][i] = 0x1 & onoff
def set_vref(self, val):
self._regMap['vref'] = val & 0xf
def set_vcasp(self, val):
self._regMap['vcasp'] = val & 0xf
def set_vcasn(self, val):
self._regMap['vcasn'] = val & 0xf
def set_vbiasp(self, val):
self._regMap['vbiasp'] = val & 0xf
def set_vbiasn(self, val):
self._regMap['vbiasn'] = val & 0xf
## Get long-integer variable
def get_config_vector(self):
ret = ( self._regMap['vbiasn'] << 126 |
self._regMap['vbiasp'] << 122 |
self._regMap['vcasn'] << 118 |
self._regMap['vcasp'] << 114 |
self._regMap['vref'] << 110 )
for i in xrange(len(self._regMap['K'])):
ret |= self._regMap['K'][i] << (len(self._regMap['K']) - i) + 99
for i in xrange(len(self._regMap['PD'])):
ret |= self._regMap['PD'][i] << (len(self._regMap['PD']) - i) + 95
for i in xrange(len(self._regMap['DAC'])):
ret |= self._regMap['DAC'][i] << (len(self._regMap['DAC'])-1 - i)*16
return ret
dac_fit_a = 4.35861E-5
dac_fit_b = 0.0349427
def dac_volt2code(self, v):
c = int((v - self.dac_fit_b) / self.dac_fit_a)
if c < 0: c = 0
if c > 65535: c = 65535
return c
def dac_code2volt(self, c):
v = c * self.dac_fit_a + self.dac_fit_b
return v
## Command generator for controlling DAC8568
#
class DAC8568(object):
def __init__(self, cmd):
self.cmd = cmd
def DACVolt(self, x):
return int(x / 2.5 * 65536.0) #calculation
def write_spi(self, val):
ret = "" # 32 bits
ret += self.cmd.write_register(0, (val >> 16) & 0xffff)
ret += self.cmd.send_pulse(2)
ret += self.cmd.write_register(0, val & 0xffff)
ret += self.cmd.send_pulse(2)
return ret
def turn_on_2V5_ref(self):
return self.write_spi(0x08000001)
def set_voltage(self, ch, v):
return self.write_spi((0x03 << 24) | (ch << 20) | (self.DACVolt(v) << 4))
## Shift_register write and read function.
#
# @param[in] s Socket that is already open and connected to the FPGA board.
# @param[in] data_to_send 130-bit value to be sent to the external SR.
# @param[in] clk_div Clock frequency division factor: (/2**clk_div). 6-bit wide.
# @return Value stored in the external SR that is read back.
# @return valid signal shows that the value stored in external SR is read back.
def shift_register_rw(s, data_to_send, clk_div):
div_reg = (clk_div & 0x3f) << 130
data_reg = data_to_send & 0x3ffffffffffffffffffffffffffffffff
cmd = Cmd()
val = div_reg | data_reg
cmdstr = ""
for i in xrange(9):
cmdstr += cmd.write_register(i, (val >> i*16) & 0xffff)
cmdstr += cmd.send_pulse(0x01)
# print([hex(ord(w)) for w in cmdstr])
s.sendall(cmdstr)
time.sleep(0.5)
# read back
cmdstr = ""
for i in xrange(9):
cmdstr += cmd.read_status(8-i)
s.sendall(cmdstr)
retw = s.recv(4*9)
# print([hex(ord(w)) for w in retw])
ret_all = 0
for i in xrange(9):
ret_all = ret_all | ( int(ord(retw[i*4+2])) << ((8-i) * 16 + 8) |
int(ord(retw[i*4+3])) << ((8-i) * 16))
ret = ret_all & 0x3ffffffffffffffffffffffffffffffff
valid = (ret_all & (1 << 130)) >> 130
print("Return: 0x%0x, valid: %d" % (ret, valid))
return ret
if __name__ == "__main__":
host = '192.168.2.3'
port = 1024
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((host,port))
cmd = Cmd()
dac8568 = DAC8568(cmd)
s.sendall(dac8568.turn_on_2V5_ref())
s.sendall(dac8568.set_voltage(6, 1.2))
# enable SDM clock
s.sendall(cmd.write_register(9, 0x01))
x2gain = 2
bufferTest = True
sdmTest = True
tms1mmReg = TMS1mmReg()
tms1mmReg.set_power_down(0, 0)
tms1mmReg.set_power_down(3, 0)
if bufferTest:
tms1mmReg.set_k(0, 0) # 0 - K1 is open, disconnect CSA output
tms1mmReg.set_k(1, 1) # 1 - K2 is closed, allow BufferX2_testIN to inject signal
tms1mmReg.set_k(4, 0) # 0 - K5 is open, disconnect SDM loads
tms1mmReg.set_k(6, 1) # 1 - K7 is closed, BufferX2 output to AOUT_BufferX2
if x2gain == 2:
tms1mmReg.set_k(2, 1) # 1 - K3 is closed, K4 is open, setting gain to X2
tms1mmReg.set_k(3, 0)
else:
tms1mmReg.set_k(2, 0)
tms1mmReg.set_k(3, 1)
if sdmTest:
tms1mmReg.set_k(4, 0)
tms1mmReg.set_k(5, 1)
else:
tms1mmReg.set_k(5, 0)
tms1mmReg.set_k(6, 1) # 1 - K7 is closed, BufferX2 output to AOUT_BufferX2
tms1mmReg.set_k(7, 1) # 1 - K8 is closed, connect CSA out to AOUT1_CSA
tms1mmReg.set_dac(0, tms1mmReg.dac_volt2code(1.38)) # VBIASN R45
tms1mmReg.set_dac(1, tms1mmReg.dac_volt2code(1.55)) # VBIASP R47
tms1mmReg.set_dac(2, tms1mmReg.dac_volt2code(1.45)) # VCASN R29
tms1mmReg.set_dac(3, tms1mmReg.dac_volt2code(1.35)) # VCASP R27
# tms1mmReg.set_dac(4, dac_volt2code(1.58)) # VDIS R16, use external DAC
s.sendall(dac8568.set_voltage(4, 1.58))
tms1mmReg.set_dac(5, tms1mmReg.dac_volt2code(2.68)) # VREF R14
data_to_send = tms1mmReg.get_config_vector()
print("Sent: 0x%0x" % (data_to_send))
div=7
shift_register_rw(s, (data_to_send), div)
s.close()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Question.identifier'
db.add_column(u'survey_question', 'identifier',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Question.identifier'
db.delete_column(u'survey_question', 'identifier')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'rule'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.backend': {
'Meta': {'object_name': 'Backend'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'survey.batch': {
'Meta': {'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.batchlocationstatus': {
'Meta': {'object_name': 'BatchLocationStatus'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_locations'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_batches'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.children': {
'Meta': {'object_name': 'Children'},
'aged_between_0_5_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_12_23_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_13_17_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_24_59_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_5_12_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_6_11_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'children'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.formula': {
'Meta': {'object_name': 'Formula'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_denominator'", 'to': "orm['survey.Question']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'numerator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_numerator'", 'to': "orm['survey.Question']"})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'number_of_females': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'number_of_males': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'survey.householdbatchcompletion': {
'Meta': {'object_name': 'HouseholdBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'household': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'head'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': "'16'", 'max_length': '100'}),
'resident_since_month': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'resident_since_year': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1984'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Backend']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'weights': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.randomhouseholdselection': {
'Meta': {'object_name': 'RandomHouseHoldSelection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_of_households': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'selected_households': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'survey.women': {
'Meta': {'object_name': 'Women'},
'aged_between_15_19_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_20_49_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'women'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['survey']
|
|
# Algorithm for determining chord symbols based on frequency spectrum
from __future__ import division
import math
samplingFrequency = 2000
bufferSize = 1024
referenceFrequency = 130.81278265 # C
numHarmonics = 2
numOctaves = 4
numBinsToSearch = 2
noteFrequencies = []
chromagram = [0.0000000000000000000]*12
noteNames = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
qualities = ["min", "maj", "sus", "", "-", "+"]
print "Reference Frequencies (hz):"
for i in range(0, 12):
freq = referenceFrequency*math.pow(2, i/12)
print noteNames[i] + ": " + str(freq)
noteFrequencies.append(freq)
# take a frequency vector and then the audio values for each of those frequencies
def calculateChromagram(freq, m):
divisorRatio = (samplingFrequency/4.0)/bufferSize
for n in range(0, 12):
chromaSum = 0
for octave in range(1, numOctaves):
noteSum = 0
for harmonic in range(1, numHarmonics):
centerBin = round((noteFrequencies[n]*octave*harmonic)/divisorRatio)
minBin = centerBin - (numBinsToSearch*harmonic)
maxBin = centerBin + (numBinsToSearch*harmonic)
minIndex = min(range(len(freq)), key=lambda i: abs(freq[i]-minBin))
maxIndex = min(range(len(freq)), key=lambda i: abs(freq[i]-maxBin))
maxVal = 0
for k in range(int(minIndex), int(maxIndex)):
if (m[k] > maxVal):
maxVal = m[k]
noteSum += (maxVal / harmonic)
chromaSum += noteSum
chromagram[n] = chromaSum
return chromagram
class ChordDetector:
ChordQuality = ["Minor", "Major", "Suspended", "Dominant", "Diminished5th", "Augmented5th"]
bias = 1.06
rootNote = 0
quality = ""
intervals = 0
chromagram = [0]*12
chordProfiles = []
chord = [0]*108
for j in range(0, 108):
tmp = [];
for t in range(0, 12):
tmp.append(0)
chordProfiles.append(tmp)
def __init__(self):
self.makechordProfiles()
def makechordProfiles(self):
i = int()
t = int()
j = 0
root = int()
third = int()
fifth = int()
seventh = int()
v1 = 1
v2 = 1;
v3 = 1;
j = 0;
# major chords
for i in range(0, 12):
root = i % 12;
third = (i+4) % 12;
fifth = (i+7) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# minor chords
for i in range(0, 12):
root = i % 12;
third = (i+3) % 12;
fifth = (i+7) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# diminished chords
for i in range(0, 12):
root = i % 12;
third = (i+3) % 12;
fifth = (i+6) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# augmented chords
for i in range(0, 12):
root = i % 12;
third = (i+4) % 12;
fifth = (i+8) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# sus2 chords
for i in range(0, 12):
root = i % 12;
third = (i+2) % 12;
fifth = (i+7) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# sus4 chords
for i in range(0, 12):
root = i % 12;
third = (i+5) % 12;
fifth = (i+7) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
j+=1;
# major 7th chords
for i in range(0, 12):
root = i % 12;
third = (i+4) % 12;
fifth = (i+7) % 12;
seventh = (i+11) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
ChordDetector.chordProfiles[j][seventh] = v3;
j+=1;
# minor 7th chords
for i in range(0, 12):
root = i % 12;
third = (i+3) % 12;
fifth = (i+7) % 12;
seventh = (i+10) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
ChordDetector.chordProfiles[j][seventh] = v3;
j+=1;
# dominant 7th chords
for i in range(0, 12):
root = i % 12;
third = (i+4) % 12;
fifth = (i+7) % 12;
seventh = (i+10) % 12;
ChordDetector.chordProfiles[j][root] = v1;
ChordDetector.chordProfiles[j][third] = v2;
ChordDetector.chordProfiles[j][fifth] = v3;
ChordDetector.chordProfiles[j][seventh] = v3;
j+=1;
# print ChordDetector.chordProfiles
#=======================================================================
def detectChord(self, chroma):
for i in range(0, 12):
ChordDetector.chromagram[i] = chroma[i];
# print chromagram
self.classifyChromagram();
#=======================================================================
def classifyChromagram(self):
i = int()
j = int()
fifth = int()
chordindex = int(); #print ChordDetector.chromagram
# remove some of the 5th note energy from ChordDetector.chromagram
for i in range(0, 12):
fifth = (i+7) % 12;
ChordDetector.chromagram[fifth] = ChordDetector.chromagram[fifth] - (0.1*ChordDetector.chromagram[i]);
if (ChordDetector.chromagram[fifth] < 0):
ChordDetector.chromagram[fifth] = 0;
# major chords
for j in range(0, 12):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,3);
# minor chords
for j in range(12, 24):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,3);
# diminished 5th chords
for j in range(24, 36):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,3);
# augmented 5th chords
for j in range(36, 48):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,3);
# sus2 chords
for j in range(48, 60):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],1,3);
# sus4 chords
for j in range(60, 72):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],1,3);
# major 7th chords
for j in range(72, 84):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],1,4);
# minor 7th chords
for j in range(84, 96):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,4);
# dominant 7th chords
for j in range(96, 108):
ChordDetector.chord[j] = self.calculateChordScore(ChordDetector.chromagram,ChordDetector.chordProfiles[j],ChordDetector.bias,4);
chordindex = self.minimumIndex(ChordDetector.chord,108);
# major
if (chordindex < 12):
ChordDetector.rootNote = chordindex;
ChordDetector.quality = ChordDetector.ChordQuality.index('Major');
ChordDetector.intervals = 0;
# minor
if ((chordindex >= 12) and (chordindex < 24)):
ChordDetector.rootNote = chordindex-12;
ChordDetector.quality = ChordDetector.ChordQuality.index('Minor');
ChordDetector.intervals = 0;
# diminished 5th
if ((chordindex >= 24) and (chordindex < 36)):
ChordDetector.rootNote = chordindex-24;
ChordDetector.quality = ChordDetector.ChordQuality.index('Diminished5th');
ChordDetector.intervals = 0;
# augmented 5th
if ((chordindex >= 36) and (chordindex < 48)):
ChordDetector.rootNote = chordindex-36;
ChordDetector.quality = ChordDetector.ChordQuality.index('Augmented5th');
ChordDetector.intervals = 0;
# sus2
if ((chordindex >= 48) and (chordindex < 60)):
ChordDetector.rootNote = chordindex-48;
ChordDetector.quality = ChordDetector.ChordQuality.index('Suspended');
ChordDetector.intervals = 2;
# sus4
if ((chordindex >= 60) and (chordindex < 72)):
ChordDetector.rootNote = chordindex-60;
ChordDetector.quality = ChordDetector.ChordQuality.index('Suspended');
ChordDetector.intervals = 4;
# major 7th
if ((chordindex >= 72) and (chordindex < 84)):
ChordDetector.rootNote = chordindex-72;
ChordDetector.quality = ChordDetector.ChordQuality.index('Major');
ChordDetector.intervals = 7;
# minor 7th
if ((chordindex >= 84) and (chordindex < 96)):
ChordDetector.rootNote = chordindex-84;
ChordDetector.quality = ChordDetector.ChordQuality.index('Minor');
ChordDetector.intervals = 7;
# dominant 7th
if ((chordindex >= 96) and (chordindex < 108)):
ChordDetector.rootNote = chordindex-96;
ChordDetector.quality = ChordDetector.ChordQuality.index('Dominant');
ChordDetector.intervals = 7;
#=======================================================================
def calculateChordScore(self, chroma,chordProfile,biasToUse,N):
sum = 0;
delta = 0;
for i in range(0, 12):
sum += (((1-chordProfile[i])*(chroma[i]*chroma[i])));
delta = (math.sqrt(sum) / ((12 - N)*biasToUse));
return delta
#=======================================================================
def minimumIndex(self, array,arrayLength):
minValue = 100000;
minIndex = 0;
for i in range(0, arrayLength):
if (array[i] < minValue):
minValue = array[i];
minIndex = i;
return minIndex;
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `3`.
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wco, wcf, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = gen_lstm_ops.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wco, wcf, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad, wcf_grad,
b_grad) = gen_lstm_ops.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wco,
wcf,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad,
wcf_grad, b_grad]
class LSTMBlockCell(rnn_cell_impl.RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much
faster. The weight and bias matrices should be compatible as long as the
variable scope matches.
"""
def __init__(self,
num_units,
forget_bias=1.0,
clip_cell=True,
use_peephole=False,
reuse=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
clip_cell: boolean, whether to apply cell clipping. See
`_lstm_block_cell()` for details.
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMBlockCell instead.
"""
super(LSTMBlockCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._clip_cell = clip_cell
self._names = {
"W": "kernel",
"b": "bias",
"wci": "w_i_diag",
"wco": "w_o_diag",
"wcf": "w_f_diag",
"scope": "lstm_cell"
}
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, x, states_prev, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or self._names["scope"]):
x_shape = x.get_shape().with_rank(2)
if not x_shape[1].value:
raise ValueError("Expecting x_shape[1] to be set: %s" % str(x_shape))
if len(states_prev) != 2:
raise ValueError("Expecting states_prev to be a tuple with length 2.")
input_size = x_shape[1].value
w = vs.get_variable(self._names["W"], [input_size + self._num_units,
self._num_units * 4])
b = vs.get_variable(
self._names["b"], [w.get_shape().with_rank(2)[1].value],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
wci = vs.get_variable(self._names["wci"], [self._num_units])
wco = vs.get_variable(self._names["wco"], [self._num_units])
wcf = vs.get_variable(self._names["wcf"], [self._num_units])
else:
wci = wco = wcf = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = states_prev
(_, cs, _, _, _, _, h) = _lstm_block_cell(
x,
cs_prev,
h_prev,
w,
b,
wci=wci,
wco=wco,
wcf=wcf,
forget_bias=self._forget_bias,
cell_clip=None if self._clip_cell else -1,
use_peephole=self._use_peephole)
new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
return h, new_state
class LSTMBlockWrapper(fused_rnn_cell.FusedRNNCell):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
or a list of `time_len` tensors of shape `[batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
scope: `VariableScope` for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
with vs.variable_scope(scope or "lstm_block_wrapper"):
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" %
inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError(
"Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(inputs, initial_cell_state,
initial_output, dtype,
sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(
sequence_length, time_len, dtype=dtype), [1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unstack(outputs)
final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell_impl.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Defaults to `3`.
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip
self._use_peephole = use_peephole
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
input_size = inputs_shape[2].value
w = vs.get_variable(
"kernel",
[input_size + self._num_units, self._num_units * 4], dtype=dtype)
b = vs.get_variable(
"bias", [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0),
dtype=dtype)
if self._use_peephole:
wci = vs.get_variable("w_i_diag", [self._num_units], dtype=dtype)
wco = vs.get_variable("w_o_diag", [self._num_units], dtype=dtype)
wcf = vs.get_variable("w_f_diag", [self._num_units], dtype=dtype)
else:
wci = wco = wcf = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
|
|
# (c) 2005 Ian Bicking, Clark C. Evans and contributors
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import time
import random
import os
import tempfile
try:
# Python 3
from email.utils import parsedate_tz, mktime_tz
except ImportError:
# Python 2
from rfc822 import parsedate_tz, mktime_tz
import six
from paste import fileapp
from paste.fileapp import *
from paste.fixture import *
# NOTE(haypo): don't use string.letters because the order of lower and upper
# case letters changes when locale.setlocale() is called for the first time
LETTERS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def test_data():
harness = TestApp(DataApp(b'mycontent'))
res = harness.get("/")
assert 'application/octet-stream' == res.header('content-type')
assert '9' == res.header('content-length')
assert "<Response 200 OK 'mycontent'>" == repr(res)
harness.app.set_content(b"bingles")
assert "<Response 200 OK 'bingles'>" == repr(harness.get("/"))
def test_cache():
def build(*args,**kwargs):
app = DataApp(b"SomeContent")
app.cache_control(*args,**kwargs)
return TestApp(app).get("/")
res = build()
assert 'public' == res.header('cache-control')
assert not res.header('expires',None)
res = build(private=True)
assert 'private' == res.header('cache-control')
assert mktime_tz(parsedate_tz(res.header('expires'))) < time.time()
res = build(no_cache=True)
assert 'no-cache' == res.header('cache-control')
assert mktime_tz(parsedate_tz(res.header('expires'))) < time.time()
res = build(max_age=60,s_maxage=30)
assert 'public, max-age=60, s-maxage=30' == res.header('cache-control')
expires = mktime_tz(parsedate_tz(res.header('expires')))
assert expires > time.time()+58 and expires < time.time()+61
res = build(private=True, max_age=60, no_transform=True, no_store=True)
assert 'private, no-store, no-transform, max-age=60' == \
res.header('cache-control')
expires = mktime_tz(parsedate_tz(res.header('expires')))
assert mktime_tz(parsedate_tz(res.header('expires'))) < time.time()
def test_disposition():
def build(*args,**kwargs):
app = DataApp(b"SomeContent")
app.content_disposition(*args,**kwargs)
return TestApp(app).get("/")
res = build()
assert 'attachment' == res.header('content-disposition')
assert 'application/octet-stream' == res.header('content-type')
res = build(filename="bing.txt")
assert 'attachment; filename="bing.txt"' == \
res.header('content-disposition')
assert 'text/plain' == res.header('content-type')
res = build(inline=True)
assert 'inline' == res.header('content-disposition')
assert 'application/octet-stream' == res.header('content-type')
res = build(inline=True, filename="/some/path/bing.txt")
assert 'inline; filename="bing.txt"' == \
res.header('content-disposition')
assert 'text/plain' == res.header('content-type')
try:
res = build(inline=True,attachment=True)
except AssertionError:
pass
else:
assert False, "should be an exception"
def test_modified():
harness = TestApp(DataApp(b'mycontent'))
res = harness.get("/")
assert "<Response 200 OK 'mycontent'>" == repr(res)
last_modified = res.header('last-modified')
res = harness.get("/",headers={'if-modified-since': last_modified})
assert "<Response 304 Not Modified ''>" == repr(res)
res = harness.get("/",headers={'if-modified-since': last_modified + \
'; length=1506'})
assert "<Response 304 Not Modified ''>" == repr(res)
res = harness.get("/",status=400,
headers={'if-modified-since': 'garbage'})
assert 400 == res.status and b"ill-formed timestamp" in res.body
res = harness.get("/",status=400,
headers={'if-modified-since':
'Thu, 22 Dec 2030 01:01:01 GMT'})
assert 400 == res.status and b"check your system clock" in res.body
def test_file():
tempfile = "test_fileapp.%s.txt" % (random.random())
content = LETTERS * 20
if six.PY3:
content = content.encode('utf8')
with open(tempfile, "wb") as fp:
fp.write(content)
try:
app = fileapp.FileApp(tempfile)
res = TestApp(app).get("/")
assert len(content) == int(res.header('content-length'))
assert 'text/plain' == res.header('content-type')
assert content == res.body
assert content == app.content # this is cashed
lastmod = res.header('last-modified')
print("updating", tempfile)
file = open(tempfile,"a+")
file.write("0123456789")
file.close()
res = TestApp(app).get("/",headers={'Cache-Control': 'max-age=0'})
assert len(content)+10 == int(res.header('content-length'))
assert 'text/plain' == res.header('content-type')
assert content + b"0123456789" == res.body
assert app.content # we are still cached
file = open(tempfile,"a+")
file.write("X" * fileapp.CACHE_SIZE) # exceed the cashe size
file.write("YZ")
file.close()
res = TestApp(app).get("/",headers={'Cache-Control': 'max-age=0'})
newsize = fileapp.CACHE_SIZE + len(content)+12
assert newsize == int(res.header('content-length'))
assert newsize == len(res.body)
assert res.body.startswith(content) and res.body.endswith(b'XYZ')
assert not app.content # we are no longer cached
finally:
os.unlink(tempfile)
def test_dir():
tmpdir = tempfile.mkdtemp()
try:
tmpfile = os.path.join(tmpdir, 'file')
tmpsubdir = os.path.join(tmpdir, 'dir')
fp = open(tmpfile, 'w')
fp.write('abcd')
fp.close()
os.mkdir(tmpsubdir)
try:
app = fileapp.DirectoryApp(tmpdir)
for path in ['/', '', '//', '/..', '/.', '/../..']:
assert TestApp(app).get(path, status=403).status == 403, ValueError(path)
for path in ['/~', '/foo', '/dir', '/dir/']:
assert TestApp(app).get(path, status=404).status == 404, ValueError(path)
assert TestApp(app).get('/file').body == b'abcd'
finally:
os.remove(tmpfile)
os.rmdir(tmpsubdir)
finally:
os.rmdir(tmpdir)
def _excercize_range(build,content):
# full content request, but using ranges'
res = build("bytes=0-%d" % (len(content)-1))
assert res.header('accept-ranges') == 'bytes'
assert res.body == content
assert res.header('content-length') == str(len(content))
res = build("bytes=-%d" % (len(content)-1))
assert res.body == content
assert res.header('content-length') == str(len(content))
res = build("bytes=0-")
assert res.body == content
assert res.header('content-length') == str(len(content))
# partial content requests
res = build("bytes=0-9", status=206)
assert res.body == content[:10]
assert res.header('content-length') == '10'
res = build("bytes=%d-" % (len(content)-1), status=206)
assert res.body == b'Z'
assert res.header('content-length') == '1'
res = build("bytes=%d-%d" % (3,17), status=206)
assert res.body == content[3:18]
assert res.header('content-length') == '15'
def test_range():
content = LETTERS * 5
if six.PY3:
content = content.encode('utf8')
def build(range, status=206):
app = DataApp(content)
return TestApp(app).get("/",headers={'Range': range}, status=status)
_excercize_range(build,content)
build('bytes=0-%d' % (len(content)+1), 416)
def test_file_range():
tempfile = "test_fileapp.%s.txt" % (random.random())
content = LETTERS * (1+(fileapp.CACHE_SIZE // len(LETTERS)))
if six.PY3:
content = content.encode('utf8')
assert len(content) > fileapp.CACHE_SIZE
with open(tempfile, "wb") as fp:
fp.write(content)
try:
def build(range, status=206):
app = fileapp.FileApp(tempfile)
return TestApp(app).get("/",headers={'Range': range},
status=status)
_excercize_range(build,content)
for size in (13,len(LETTERS), len(LETTERS)-1):
fileapp.BLOCK_SIZE = size
_excercize_range(build,content)
finally:
os.unlink(tempfile)
def test_file_cache():
filename = os.path.join(os.path.dirname(__file__),
'urlparser_data', 'secured.txt')
app = TestApp(fileapp.FileApp(filename))
res = app.get('/')
etag = res.header('ETag')
last_mod = res.header('Last-Modified')
res = app.get('/', headers={'If-Modified-Since': last_mod},
status=304)
res = app.get('/', headers={'If-None-Match': etag},
status=304)
res = app.get('/', headers={'If-None-Match': 'asdf'},
status=200)
res = app.get('/', headers={'If-Modified-Since': 'Sat, 1 Jan 2005 12:00:00 GMT'},
status=200)
res = app.get('/', headers={'If-Modified-Since': last_mod + '; length=100'},
status=304)
res = app.get('/', headers={'If-Modified-Since': 'invalid date'},
status=400)
def test_methods():
filename = os.path.join(os.path.dirname(__file__),
'urlparser_data', 'secured.txt')
app = TestApp(fileapp.FileApp(filename))
get_res = app.get('')
res = app.get('', extra_environ={'REQUEST_METHOD': 'HEAD'})
assert res.headers == get_res.headers
assert not res.body
app.post('', status=405) # Method Not Allowed
|
|
from __future__ import unicode_literals
import threading
import weakref
from functools import wraps
import six
from psycopg2cffi._impl import consts
from psycopg2cffi._impl import encodings as _enc
from psycopg2cffi._impl import exceptions
from psycopg2cffi._impl.libpq import libpq, ffi
from psycopg2cffi._impl import util
from psycopg2cffi._impl.adapters import bytes_to_ascii, ascii_to_bytes
from psycopg2cffi._impl.cursor import Cursor
from psycopg2cffi._impl.lobject import LargeObject
from psycopg2cffi._impl.notify import Notify
from psycopg2cffi._impl.xid import Xid
# Map between isolation levels names and values and back.
_isolevels = {
'': consts.ISOLATION_LEVEL_AUTOCOMMIT,
'read uncommitted': consts.ISOLATION_LEVEL_READ_UNCOMMITTED,
'read committed': consts.ISOLATION_LEVEL_READ_COMMITTED,
'repeatable read': consts.ISOLATION_LEVEL_REPEATABLE_READ,
'serializable': consts.ISOLATION_LEVEL_SERIALIZABLE,
'default': -1,
}
for k, v in list(_isolevels.items()):
_isolevels[v] = k
del k, v
_green_callback = None
def check_closed(func):
@wraps(func)
def check_closed_(self, *args, **kwargs):
if self.closed:
raise exceptions.InterfaceError('connection already closed')
return func(self, *args, **kwargs)
return check_closed_
def check_notrans(func):
@wraps(func)
def check_notrans_(self, *args, **kwargs):
if self.status != consts.STATUS_READY:
raise exceptions.ProgrammingError('not valid in transaction')
return func(self, *args, **kwargs)
return check_notrans_
def check_tpc(func):
@wraps(func)
def check_tpc_(self, *args, **kwargs):
if self._tpc_xid:
raise exceptions.ProgrammingError(
'%s cannot be used during a two-phase transaction'
% func.__name__)
return func(self, *args, **kwargs)
return check_tpc_
def check_tpc_supported(func):
@wraps(func)
def check_tpc_supported_(self, *args, **kwargs):
if self.server_version < 80100:
raise exceptions.NotSupportedError(
"server version %s: two-phase transactions not supported"
% self.server_version)
return func(self, *args, **kwargs)
return check_tpc_supported_
def check_async(func):
@wraps(func)
def check_async_(self, *args, **kwargs):
if self._async:
raise exceptions.ProgrammingError(
'%s cannot be used in asynchronous mode' % func.__name__)
return func(self, *args, **kwargs)
return check_async_
class Connection(object):
# Various exceptions which should be accessible via the Connection
# class according to dbapi 2.0
Error = exceptions.Error
DatabaseError = exceptions.DatabaseError
IntegrityError = exceptions.IntegrityError
InterfaceError = exceptions.InterfaceError
InternalError = exceptions.InternalError
NotSupportedError = exceptions.NotSupportedError
OperationalError = exceptions.OperationalError
ProgrammingError = exceptions.ProgrammingError
Warning = exceptions.Warning
def __init__(self, dsn, async=False):
self.dsn = dsn
self.status = consts.STATUS_SETUP
self._encoding = None
self._py_enc = None
self._closed = 0
self._cancel = ffi.NULL
self._typecasts = {}
self._tpc_xid = None
self._notifies = []
self._autocommit = False
self._pgconn = None
self._equote = False
self._lock = threading.RLock()
self.notices = []
self.cursor_factory = None
# The number of commits/rollbacks done so far
self._mark = 0
self._async = async
self._async_status = consts.ASYNC_DONE
self._async_cursor = None
self_ref = weakref.ref(self)
self._notice_callback = ffi.callback(
'void(void *, const char *)',
lambda arg, message: self_ref()._process_notice(
arg, bytes_to_ascii(ffi.string(message))))
if not self._async:
self._connect_sync()
else:
self._connect_async()
def _connect_sync(self):
self._pgconn = libpq.PQconnectdb(self.dsn.encode('utf-8'))
if not self._pgconn:
raise exceptions.OperationalError('PQconnectdb() failed')
elif libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:
raise self._create_exception()
# Register notice processor
libpq.PQsetNoticeProcessor(
self._pgconn, self._notice_callback, ffi.NULL)
self.status = consts.STATUS_READY
self._setup()
def _connect_async(self):
"""Create an async connection.
The connection will be completed banging on poll():
First with self._conn_poll_connecting() that will finish connection,
then with self._poll_setup_async() that will do the same job
of self._setup().
"""
self._pgconn = libpq.PQconnectStart(ascii_to_bytes(self.dsn))
if not self._pgconn:
raise exceptions.OperationalError('PQconnectStart() failed')
elif libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:
raise self._create_exception()
libpq.PQsetNoticeProcessor(
self._pgconn, self._notice_callback, ffi.NULL)
def __del__(self):
self._close()
@check_closed
def __enter__(self):
return self
def __exit__(self, type, name, tb):
if type is None:
self.commit()
else:
self.rollback()
def close(self):
return self._close()
@check_closed
@check_async
@check_tpc
def rollback(self):
self._rollback()
@check_closed
@check_async
@check_tpc
def commit(self):
self._commit()
@check_closed
@check_async
def reset(self):
with self._lock:
self._execute_command(
"ABORT; RESET ALL; SET SESSION AUTHORIZATION DEFAULT;")
self.status = consts.STATUS_READY
self._mark += 1
self._autocommit = False
self._tpc_xid = None
def _get_guc(self, name):
"""Return the value of a configuration parameter."""
with self._lock:
query = 'SHOW %s' % name
if _green_callback:
pgres = self._execute_green(query)
else:
pgres = libpq.PQexec(self._pgconn, ascii_to_bytes(query))
if not pgres or libpq.PQresultStatus(pgres) != libpq.PGRES_TUPLES_OK:
raise exceptions.OperationalError("can't fetch %s" % name)
rv = bytes_to_ascii(ffi.string(libpq.PQgetvalue(pgres, 0, 0)))
libpq.PQclear(pgres)
return rv
def _set_guc(self, name, value):
"""Set the value of a configuration parameter."""
if value.lower() != 'default':
value = util.quote_string(self, value)
else:
value = b'default'
self._execute_command(ascii_to_bytes('SET %s TO ' % name) + value)
def _set_guc_onoff(self, name, value):
"""Set the value of a configuration parameter to a boolean.
The string 'default' is accepted too.
"""
if isinstance(value, six.string_types) and \
value.lower() in (b'default', 'default'):
value = 'default'
else:
value = 'on' if value else 'off'
self._set_guc(name, value)
@property
@check_closed
def isolation_level(self):
if self._autocommit:
return consts.ISOLATION_LEVEL_AUTOCOMMIT
else:
name = self._get_guc('default_transaction_isolation')
return _isolevels[name.lower()]
@check_async
def set_isolation_level(self, level):
if level < 0 or level > 4:
raise ValueError('isolation level must be between 0 and 4')
prev = self.isolation_level
if prev == level:
return
self._rollback()
if level == consts.ISOLATION_LEVEL_AUTOCOMMIT:
return self.set_session(autocommit=True)
else:
return self.set_session(isolation_level=level, autocommit=False)
@check_closed
@check_notrans
def set_session(self, isolation_level=None, readonly=None, deferrable=None,
autocommit=None):
if isolation_level is not None:
if isinstance(isolation_level, int):
if isolation_level < 1 or isolation_level > 4:
raise ValueError('isolation level must be between 1 and 4')
isolation_level = _isolevels[isolation_level]
elif isinstance(isolation_level, six.string_types):
if isinstance(isolation_level, six.binary_type):
isolation_level = bytes_to_ascii(isolation_level)
isolation_level = isolation_level.lower()
if not isolation_level or isolation_level not in _isolevels:
raise ValueError("bad value for isolation level: '%s'" %
isolation_level)
else:
raise TypeError("bad isolation level: '%r'" % isolation_level)
if self.server_version < 80000:
if isolation_level == 'read uncommitted':
isolation_level = 'read committed'
elif isolation_level == 'repeatable read':
isolation_level = 'serializable'
self._set_guc("default_transaction_isolation", isolation_level)
if readonly is not None:
self._set_guc_onoff('default_transaction_read_only', readonly)
if deferrable is not None:
self._set_guc_onoff('default_transaction_deferrable', deferrable)
if autocommit is not None:
self._autocommit = bool(autocommit)
@property
def autocommit(self):
return self._autocommit
@autocommit.setter
def autocommit(self, value):
self.set_session(autocommit=value)
@property
def async(self):
return self._async
@check_closed
def get_backend_pid(self):
return libpq.PQbackendPID(self._pgconn)
def get_parameter_status(self, parameter):
p = libpq.PQparameterStatus(self._pgconn, ascii_to_bytes(parameter))
return bytes_to_ascii(ffi.string(p)) if p != ffi.NULL else None
def get_transaction_status(self):
return libpq.PQtransactionStatus(self._pgconn)
def cursor(self, name=None, cursor_factory=None,
withhold=False, scrollable=None):
if cursor_factory is None:
cursor_factory = self.cursor_factory or Cursor
cur = cursor_factory(self, name)
if not isinstance(cur, Cursor):
raise TypeError(
"cursor factory must be subclass of %s" %
'.'.join([Cursor.__module__, Cursor.__name__]))
if withhold:
cur.withhold = withhold
if scrollable is not None:
cur.scrollable = scrollable
if name and self._async:
raise exceptions.ProgrammingError(
"asynchronous connections cannot produce named cursors")
cur._mark = self._mark
return cur
@check_closed
@check_tpc
def cancel(self):
err_length = 256
errbuf = ffi.new('char[]', err_length)
if libpq.PQcancel(self._cancel, errbuf, err_length) == 0:
raise exceptions.OperationalError(ffi.string(errbuf))
def isexecuting(self):
if not self._async:
return False
if self.status != consts.STATUS_READY:
return True
if self._async_cursor is not None:
return True
return False
@property
def encoding(self):
return self._encoding
@check_closed
@check_async
def set_client_encoding(self, encoding):
encoding = _enc.normalize(encoding)
if self.encoding == encoding:
return
pyenc = _enc.encodings[encoding]
self._rollback()
self._set_guc('client_encoding', encoding)
self._encoding = encoding
self._py_enc = pyenc
@property
def notifies(self):
return self._notifies
@property
@check_closed
def protocol_version(self):
return libpq.PQprotocolVersion(self._pgconn)
@property
@check_closed
def server_version(self):
return libpq.PQserverVersion(self._pgconn)
def fileno(self):
return libpq.PQsocket(self._pgconn)
@property
def closed(self):
return self._closed
@check_closed
@check_tpc_supported
def xid(self, format_id, gtrid, bqual):
return Xid(format_id, gtrid, bqual)
@check_closed
@check_async
@check_tpc_supported
def tpc_begin(self, xid):
if not isinstance(xid, Xid):
xid = Xid.from_string(xid)
if self.status != consts.STATUS_READY:
raise exceptions.ProgrammingError(
'tpc_begin must be called outside a transaction')
if self._autocommit:
raise exceptions.ProgrammingError(
"tpc_begin can't be called in autocommit mode")
self._begin_transaction()
self._tpc_xid = xid
@check_closed
@check_async
@check_tpc_supported
def tpc_commit(self, xid=None):
self._finish_tpc('COMMIT PREPARED', self._commit, xid)
@check_closed
@check_async
@check_tpc_supported
def tpc_rollback(self, xid=None):
self._finish_tpc('ROLLBACK PREPARED', self._rollback, xid)
@check_closed
@check_async
def tpc_prepare(self):
if not self._tpc_xid:
raise exceptions.ProgrammingError(
'prepare must be called inside a two-phase transaction')
self._execute_tpc_command('PREPARE TRANSACTION', self._tpc_xid)
self.status = consts.STATUS_PREPARED
@check_closed
@check_async
@check_tpc_supported
def tpc_recover(self):
return Xid.tpc_recover(self)
def lobject(self, oid=0, mode='', new_oid=0, new_file=None,
lobject_factory=LargeObject):
obj = lobject_factory(self, oid, mode, new_oid, new_file)
return obj
def poll(self):
if self.status == consts.STATUS_SETUP:
self.status = consts.STATUS_CONNECTING
return consts.POLL_WRITE
if self.status == consts.STATUS_CONNECTING:
res = self._poll_connecting()
if res == consts.POLL_OK and self._async:
return self._poll_setup_async()
return res
if self.status in (consts.STATUS_READY, consts.STATUS_BEGIN,
consts.STATUS_PREPARED):
res = self._poll_query()
if res == consts.POLL_OK and self._async and self._async_cursor:
# Get the cursor object from the weakref
curs = self._async_cursor()
if curs is None:
util.pq_clear_async(self._pgconn)
raise exceptions.InterfaceError(
"the asynchronous cursor has disappeared")
libpq.PQclear(curs._pgres)
curs._pgres = util.pq_get_last_result(self._pgconn)
try:
curs._pq_fetch()
finally:
self._async_cursor = None
return res
return consts.POLL_ERROR
def _poll_connecting(self):
"""poll during a connection attempt until the connection has
established.
"""
status_map = {
libpq.PGRES_POLLING_OK: consts.POLL_OK,
libpq.PGRES_POLLING_READING: consts.POLL_READ,
libpq.PGRES_POLLING_WRITING: consts.POLL_WRITE,
libpq.PGRES_POLLING_FAILED: consts.POLL_ERROR,
libpq.PGRES_POLLING_ACTIVE: consts.POLL_ERROR,
}
res = status_map.get(libpq.PQconnectPoll(self._pgconn), None)
if res is None:
return consts.POLL_ERROR
elif res == consts.POLL_ERROR:
raise self._create_exception()
return res
def _poll_query(self):
"""Poll the connection for the send query/retrieve result phase
Advance the async_status (usually going WRITE -> READ -> DONE) but
don't mess with the connection status.
"""
if self._async_status == consts.ASYNC_WRITE:
ret = self._poll_advance_write(libpq.PQflush(self._pgconn))
elif self._async_status == consts.ASYNC_READ:
if self._async:
ret = self._poll_advance_read(self._is_busy())
else:
ret = self._poll_advance_read(self._is_busy())
elif self._async_status == consts.ASYNC_DONE:
ret = self._poll_advance_read(self._is_busy())
else:
ret = consts.POLL_ERROR
return ret
def _poll_advance_write(self, flush):
"""Advance to the next state after an attempt of flushing output"""
if flush == 0:
self._async_status = consts.ASYNC_READ
return consts.POLL_READ
if flush == 1:
return consts.POLL_WRITE
if flush == -1:
raise self._create_exception()
return consts.POLL_ERROR
def _poll_advance_read(self, busy):
"""Advance to the next state after a call to a _is_busy* method"""
if busy == 0:
self._async_status = consts.ASYNC_DONE
return consts.POLL_OK
if busy == 1:
return consts.POLL_READ
return consts.POLL_ERROR
def _poll_setup_async(self):
"""Advance to the next state during an async connection setup
If the connection is green, this is performed by the regular sync
code so the queries are sent by conn_setup() while in
CONN_STATUS_READY state.
"""
if self.status == consts.STATUS_CONNECTING:
util.pq_set_non_blocking(self._pgconn, 1, True)
self._equote = self._get_equote()
self._get_encoding()
self._have_cancel_key()
self._autocommit = True
# If the current datestyle is not compatible (not ISO) then
# force it to ISO
if not self._iso_compatible_datestyle():
self.status = consts.STATUS_DATESTYLE
if libpq.PQsendQuery(self._pgconn, b"SET DATESTYLE TO 'ISO'"):
self._async_status = consts.ASYNC_WRITE
return consts.POLL_WRITE
else:
raise self._create_exception()
self.status = consts.STATUS_READY
return consts.POLL_OK
if self.status == consts.STATUS_DATESTYLE:
res = self._poll_query()
if res != consts.POLL_OK:
return res
pgres = util.pq_get_last_result(self._pgconn)
if not pgres or \
libpq.PQresultStatus(pgres) != libpq.PGRES_COMMAND_OK:
raise exceptions.OperationalError("can't set datetyle to ISO")
libpq.PQclear(pgres)
self.status = consts.STATUS_READY
return consts.POLL_OK
return consts.POLL_ERROR
def _setup(self):
self._equote = self._get_equote()
self._get_encoding()
self._have_cancel_key()
with self._lock:
# If the current datestyle is not compatible (not ISO) then
# force it to ISO
if not self._iso_compatible_datestyle():
self.status = consts.STATUS_DATESTYLE
self._set_guc('datestyle', 'ISO')
self._closed = 0
def _have_cancel_key(self):
if self._cancel != ffi.NULL:
tmp, self._cancel = self._cancel, ffi.NULL
libpq.PQfreeCancel(tmp)
self._cancel = libpq.PQgetCancel(self._pgconn)
if self._cancel == ffi.NULL:
raise exceptions.OperationalError("can't get cancellation key")
def _begin_transaction(self):
if self.status == consts.STATUS_READY and not self._autocommit:
self._execute_command('BEGIN')
self.status = consts.STATUS_BEGIN
def _execute_command(self, command):
with self._lock:
if _green_callback:
pgres = self._execute_green(command)
else:
pgres = libpq.PQexec(self._pgconn, ascii_to_bytes(command))
if not pgres:
raise self._create_exception()
try:
pgstatus = libpq.PQresultStatus(pgres)
if pgstatus != libpq.PGRES_COMMAND_OK:
exc = self._create_exception(pgres=pgres)
pgres = None # ownership transferred to exc
raise exc
finally:
if pgres:
libpq.PQclear(pgres)
def _execute_tpc_command(self, command, xid):
cmd = b' '.join([
ascii_to_bytes(command),
util.quote_string(self, str(xid))])
self._execute_command(cmd)
self._mark += 1
def _execute_green(self, query):
"""Execute version for green threads"""
if self._async_cursor:
raise exceptions.ProgrammingError(
"a single async query can be executed on the same connection")
self._async_cursor = True
if not libpq.PQsendQuery(self._pgconn, ascii_to_bytes(query)):
self._async_cursor = None
return
self._async_status = consts.ASYNC_WRITE
try:
_green_callback(self)
except Exception:
self.close()
raise
else:
return util.pq_get_last_result(self._pgconn)
finally:
self._async_cursor = None
self._async_status = consts.ASYNC_DONE
def _finish_tpc(self, command, fallback, xid):
if xid:
# committing/aborting a received transaction.
if self.status != consts.STATUS_READY:
raise exceptions.ProgrammingError(
"tpc_commit/tpc_rollback with a xid "
"must be called outside a transaction")
self._execute_tpc_command(command, xid)
else:
# committing/aborting our own transaction.
if not self._tpc_xid:
raise exceptions.ProgrammingError(
"tpc_commit/tpc_rollback with no parameter "
"must be called in a two-phase transaction")
if self.status == consts.STATUS_BEGIN:
fallback()
elif self.status == consts.STATUS_PREPARED:
self._execute_tpc_command(command, self._tpc_xid)
else:
raise exceptions.InterfaceError(
'unexpected state in tpc_commit/tpc_rollback')
self.status = consts.STATUS_READY
self._tpc_xid = None
def _close(self):
if self._closed == 1:
return
if self._cancel:
libpq.PQfreeCancel(self._cancel)
self._cancel = ffi.NULL
if self._pgconn:
libpq.PQfinish(self._pgconn)
self._pgconn = None
self._closed = 1
def _commit(self):
with self._lock:
if self._autocommit or self.status != consts.STATUS_BEGIN:
return
self._mark += 1
try:
self._execute_command('COMMIT')
finally:
self.status = consts.STATUS_READY
def _rollback(self):
with self._lock:
if self._autocommit or self.status != consts.STATUS_BEGIN:
return
self._mark += 1
try:
self._execute_command('ROLLBACK')
finally:
self.status = consts.STATUS_READY
def _get_encoding(self):
"""Retrieving encoding"""
client_encoding = self.get_parameter_status('client_encoding')
self._encoding = _enc.normalize(client_encoding)
self._py_enc = _enc.encodings[self._encoding]
def _get_equote(self):
ret = libpq.PQparameterStatus(
self._pgconn, b'standard_conforming_strings')
return ret and ffi.string(ret) == b'off' or False
def _is_busy(self):
with self._lock:
if libpq.PQconsumeInput(self._pgconn) == 0:
raise exceptions.OperationalError(
ffi.string(libpq.PQerrorMessage(self._pgconn)))
res = libpq.PQisBusy(self._pgconn)
self._process_notifies()
return res
def _process_notice(self, arg, message):
"""Store the given message in `self.notices`
Also delete older entries to make sure there are no more then 50
entries in the list.
"""
self.notices.append(message)
length = len(self.notices)
if length > 50:
del self.notices[:length - 50]
def _process_notifies(self):
while True:
pg_notify = libpq.PQnotifies(self._pgconn)
if not pg_notify:
break
notify = Notify(
pg_notify.be_pid,
ffi.string(pg_notify.relname).decode(self._py_enc),
ffi.string(pg_notify.extra).decode(self._py_enc))
self._notifies.append(notify)
libpq.PQfreemem(pg_notify)
def _create_exception(self, pgres=None, msg=None, cursor=None):
"""Return the appropriate exception instance for the current status.
IMPORTANT: the new exception takes ownership of pgres: if pgres is
passed as parameter, the callee must delete its pointer (e.g. it may
be set to null). If there is a pgres in the cursor it is "stolen": the
cursor will have it set to Null.
"""
assert pgres is None or cursor is None, \
"cannot specify pgres and cursor together"
if cursor and cursor._pgres:
pgres, cursor._pgres = cursor._pgres, ffi.NULL
exc_type = exceptions.OperationalError
code = pgmsg = None
# _py_enc can be not initialized yet in case of errors when
# establishing the connection
err_enc = self._py_enc or 'utf-8'
# If no custom message is passed then get the message from postgres.
# If pgres is available then we first try to get the message for the
# last command, and then the error message for the connection
if pgres:
pgmsg = libpq.PQresultErrorMessage(pgres)
pgmsg = ffi.string(pgmsg).decode(err_enc, 'replace') \
if pgmsg else None
# Get the correct exception class based on the error code
code = libpq.PQresultErrorField(pgres, libpq.LIBPQ_DIAG_SQLSTATE)
if code != ffi.NULL:
code = bytes_to_ascii(ffi.string(code))
exc_type = util.get_exception_for_sqlstate(code)
else:
code = None
exc_type = exceptions.DatabaseError
if not pgmsg:
pgmsg = libpq.PQerrorMessage(self._pgconn)
pgmsg = ffi.string(pgmsg).decode(err_enc, 'replace') \
if pgmsg else None
if msg is None and pgmsg:
msg = pgmsg
for prefix in ("ERROR: ", "FATAL: ", "PANIC: "):
if msg.startswith(prefix):
msg = msg[len(prefix):]
break
# Clear the connection if the status is CONNECTION_BAD (fatal error)
if self._pgconn and libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:
self._closed = 2
exc = exc_type(msg)
exc.pgcode = code
exc.pgerror = pgmsg
exc.cursor = cursor
exc._pgres = pgres
return exc
def _have_wait_callback(self):
return bool(_green_callback)
def _iso_compatible_datestyle(self):
''' Return whether connection DateStyle is ISO-compatible
'''
datestyle = libpq.PQparameterStatus(self._pgconn, b'DateStyle')
return datestyle != ffi.NULL and \
ffi.string(datestyle).startswith(b'ISO')
def _connect(dsn, connection_factory=None, async=False):
if connection_factory is None:
connection_factory = Connection
# Mimic the construction method as used by psycopg2, which notes:
# Here we are breaking the connection.__init__ interface defined
# by psycopg2. So, if not requiring an async conn, avoid passing
# the async parameter.
if async:
return connection_factory(dsn, async=True)
else:
return connection_factory(dsn)
|
|
import glob
import os.path
import re
import subprocess
import sys
import time
base_dir = os.path.dirname(__file__)
maxas_dir = os.path.join(base_dir, "maxas")
sass_dir = os.path.join(base_dir, "sass")
# Tile sizes: m, n, k, vA,vB,vC div, op (dynamic shared options)
k128x128x8 = (128, 128, 8, 4, 4, 1, 2, 0, (0,))
k32x32x32 = ( 32, 32, 32, 4, 4, 1, 4, 0, (0, 2**14))
k32x64x32_NN = ( 32, 64, 32, 8, 4, 4, 4, 1, (0, 2**13))
k32x32x64_NT = ( 32, 32, 64, 8, 8, 4, 4, 1, (0,))
k16x64x64_NN = ( 16, 64, 64, 8, 4, 4, 4, 1, (0,))
k16x64x64_NT = ( 16, 64, 64, 8, 8, 4, 4, 1, (0,))
selections = {
"s" : {
"TN" : (k128x128x8, k32x32x32),
"NN" : (k128x128x8, k32x32x32),
"NT" : (k128x128x8, k32x32x32),
"TT" : (k128x128x8, k32x32x32),
},
"h" : {
"TN" : (k128x128x8, k32x32x32),
"NN" : (k128x128x8, k32x32x32, k32x64x32_NN, k16x64x64_NN),
"NT" : (k128x128x8, k32x32x32, k32x32x64_NT, k16x64x64_NT),
"TT" : (k128x128x8, k32x32x32),
},
}
kernels = {
# Generic gemm tiles
"sgemm_128x128x8": {"threads": 256, "sass": "xgemm_128x128x8", "params": "xgemm", "share": "(128*8 + 32)*4 + 4", "args": {"type": "s"} },
"hgemm_128x128x8": {"threads": 256, "sass": "xgemm_128x128x8", "params": "xgemm", "share": "(128*8 + 32)*4 + 4", "args": {"type": "h"} },
"sgemm_32x32x32": {"threads": 128, "sass": "xgemm_32x32x32", "params": "xgemm", "share": "(32*33)*4 + 4", "args": {"type": "s"} },
"hgemm_32x32x32": {"threads": 128, "sass": "xgemm_32x32x32", "params": "xgemm", "share": "(32*33)*4 + 4", "args": {"type": "h"} },
# Custom hgemm tiles designed for small minibatch RNNs
"hgemm_32x64x32_NN": {"threads": 128, "sass": "hgemm_32x64x32_NN", "params": "xgemm", "share": "32*33*2 + 64*32*2 + 4" },
"hgemm_32x32x64_NT": {"threads": 128, "sass": "hgemm_32x32x64_NT", "params": "xgemm", "share": "32*65*4 + 4" },
"hgemm_16x64x64_NN": {"threads": 128, "sass": "hgemm_16x64x64_NN", "params": "xgemm", "share": "(16*64 + 32)*2 + 64*64*2 + 4" },
"hgemm_16x64x64_NT": {"threads": 128, "sass": "hgemm_16x64x64_NT", "params": "xgemm", "share": "(16*64 + 32)*2 + (64*64 + 32)*2 + 4" },
}
_params = {
"xgemm": [
"float* param_C",
"float* param_A",
"float* param_B",
"float param_alpha",
"float param_beta",
"unsigned param_cda",
"unsigned param_cdb",
"unsigned param_cdc",
"unsigned param_m",
"unsigned param_n",
"unsigned param_k",
"unsigned param_blk_a",
"unsigned param_blk_b",
],
}
_space_re = re.compile(r"\s+")
_share_template = r"""
.shared .align 4 .b32 share[{0}];
"""
_kernel_template = r"""
.version {6}
.target {0}
.address_size 64
// args: {5}
.visible .entry {1}(
{2}
)
.reqntid {3}
{{
{4}
ret;
}}
"""
def _get_cache_dir(subdir=None):
cache_dir = 'temp/'
if subdir:
subdir = subdir if isinstance(subdir, list) else [subdir]
cache_dir = os.path.join(cache_dir, *subdir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir
def get_ptx_file(kernel_spec, kernel_name, arch, ptx_ver):
ptx_dir = _get_cache_dir([arch, 'ptx'])
thread_spec = kernel_spec["threads"]
args_spec = str(kernel_spec.get("args",""))
param_spec = _params[kernel_spec["params"]]
kernel_params = []
for p in param_spec:
ptype, pname = _space_re.split(p)
if ptype[-1] == '*':
ptype = '.u64'
elif ptype == 'float':
ptype = '.f32'
else:
ptype = '.u32'
kernel_params.append(" .param %s %s" % (ptype, pname))
kernel_params = ",\n".join(kernel_params)
if "share" in kernel_spec:
share = _share_template.format(eval(kernel_spec["share"]))
else:
share = ""
kernel_text = _kernel_template.format(arch, kernel_name, kernel_params, thread_spec, share, args_spec, ptx_ver)
kernel_ptx = os.path.join(ptx_dir, kernel_name + ".ptx")
current_text = ""
if os.path.exists(kernel_ptx):
f = open(kernel_ptx, "r")
current_text = f.read()
f.close()
# only write out the kernel if text has changed.
if kernel_text != current_text:
f = open(kernel_ptx, "w")
f.write(kernel_text)
f.close()
return kernel_ptx
include_re = re.compile(r'^<INCLUDE\s+file="([^"]+)"\s*/>')
def run_command(cmdlist):
cmd = " ".join(cmdlist)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode:
raise RuntimeError("Error(%d):\n%s\n%s" % (proc.returncode, cmd, err))
def get_kernel(base_name, major, minor, options=None):
if major < 5:
raise RuntimeError("sass kernels require Maxwell or greater class hardware")
elif major >= 7:
raise RuntimeError("sm version 7 or greater is not supported")
arch = "sm_%d%d" % (major, minor)
libprefix = "PERL5LIB=%s" % maxas_dir
maxas_i = [libprefix, os.path.join(maxas_dir, "maxas.pl") + " -i -w"]
maxas_p = [libprefix, os.path.join(maxas_dir, "maxas.pl") + " -p"]
kernel_spec = kernels[base_name]
kernel_name = base_name
# static options
if "args" in kernel_spec:
for pair in kernel_spec["args"].items():
maxas_i.append("-D%s %s" % pair)
maxas_p.append("-D%s %s" % pair)
# dynamic options
if options is not None:
for opt in options:
if type(opt) is tuple:
maxas_i.append("-D%s %s" % opt)
maxas_p.append("-D%s %s" % opt)
kernel_name += "_%s%s" % opt
else:
maxas_i.append("-D%s 1" % opt)
maxas_p.append("-D%s 1" % opt)
kernel_name += "_%s" % opt
maxas_i.insert(2, "-k " + kernel_name)
sass_name = kernel_spec["sass"] + ".sass"
cubin_name = kernel_name + ".cubin"
cubin_dir = _get_cache_dir([arch, 'cubin'])
header_dir = os.path.join(base_dir, "include/kernels")
ptx_version = "4.2" if major < 6 else "5.0"
ptx_file = get_ptx_file(kernel_spec, kernel_name, arch, ptx_version)
cubin_file = os.path.join(cubin_dir, cubin_name)
sass_file = os.path.join(sass_dir, sass_name)
header_file = os.path.join(header_dir, kernel_name + "_" + arch + ".h")
if not os.path.exists(sass_file):
raise RuntimeError("Missing: %s for kernel: %s" % (sass_name, kernel_name))
# build the cubin and run maxas in the same command
# we don't want the chance of a generated cubin not processed by maxas (in case user hits ^C in between these steps)
command_string = [ "ptxas -v -arch", arch, "-o", cubin_file, ptx_file, ";" ] + maxas_i + [sass_file, cubin_file]
run_command(command_string)
cubin_mtime = time.time()
# now also generate the associated header file containing the cubin
with open(cubin_file, 'rb') as input_file:
with open(header_file, 'wb') as output_file:
output_file.write('const uint8_t %s[] = {' % (kernel_name + "_" + arch))
byte = input_file.read(1)
count = 0
while byte:
if count % 12 == 0:
output_file.write('\n ')
output_file.write(' 0x' + byte.encode('hex') + ',')
byte = input_file.read(1)
count += 1
output_file.write('\n};')
def gen_kernels():
for prefix in ['s', 'h']:
for op in ['NN', 'NT', 'TN', 'TT']:
for tileM, tileN, tileK, vecA, vecB, vecC, div, base_op, dyn_shared in selections[prefix][op]:
for vec in [False, True]:
for major, minor in [(5, 0), (6, 0)]:
if base_op:
# The op is part of the base kernel name
base = "%sgemm_%dx%dx%d_%s" % (prefix, tileM, tileN, tileK, op)
opts = ( "vec", ) if vec else ()
else:
# The op is an option passed to a more generic kernel
base = "%sgemm_%dx%dx%d" % (prefix, tileM, tileN, tileK)
opts = ( op, "vec" ) if vec else (op,)
get_kernel(base, major, minor, opts)
def main():
gen_kernels()
if __name__ == "__main__":
main()
|
|
"""
Basic tests for XForms
"""
import os
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User, Group
from django.test.client import Client
from django.core.exceptions import ValidationError
from .models import XForm, XFormField, XFormFieldConstraint, xform_received, lookup_user_by_connection
from eav.models import Attribute
from django.contrib.sites.models import Site
from .app import App
from rapidsms.messages.incoming import IncomingMessage
from rapidsms.models import Connection, Backend
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
class ModelTest(TestCase): #pragma: no cover
def setUp(self):
settings.AUTHENTICATE_XFORMS = False
settings.AUTH_PROFILE_MODEL = None
self.user = User.objects.create_user('fred', 'fred@wilma.com', 'secret')
self.user.save()
self.xform = XForm.on_site.create(name='test', keyword='test', owner=self.user,
site=Site.objects.get_current(),
response='thanks')
def failIfValid(self, constraint, value, type):
try:
constraint.validate(value, type, 'sms')
self.fail("Should have failed validating: %s" % value)
except ValidationError:
pass
def failUnlessValid(self, constraint, value, type):
try:
constraint.validate(value, type, 'sms')
except ValidationError:
self.fail("Should have passed validating: %s" % value)
def failIfClean(self, field, value, type):
try:
field.clean_submission(value, 'sms')
self.fail("Should have failed cleaning: %s" % value)
except ValidationError:
pass
def failUnlessClean(self, field, value, type):
try:
field.clean_submission(value, 'sms')
except ValidationError:
self.fail("Should have passed cleaning: %s" % value)
def testMinValConstraint(self):
msg = 'error message'
c = XFormFieldConstraint(type='min_val', test='10', message=msg)
self.failIfValid(c, '1', XFormField.TYPE_INT)
self.failUnlessValid(c, None, XFormField.TYPE_INT)
self.failUnlessValid(c, '10', XFormField.TYPE_INT)
self.failUnlessValid(c, '11', XFormField.TYPE_INT)
def testMaxValConstraint(self):
msg = 'error message'
c = XFormFieldConstraint(type='max_val', test='10', message=msg)
self.failUnlessValid(c, '1', XFormField.TYPE_INT)
self.failUnlessValid(c, '10', XFormField.TYPE_INT)
self.failUnlessValid(c, None, XFormField.TYPE_INT)
self.failIfValid(c, '11', XFormField.TYPE_INT)
def testMinLenConstraint(self):
msg = 'error message'
c = XFormFieldConstraint(type='min_len', test='2', message=msg)
self.failIfValid(c, 'a', XFormField.TYPE_TEXT)
self.failIfValid(c, '', XFormField.TYPE_TEXT)
self.failUnlessValid(c, None, XFormField.TYPE_TEXT)
self.failUnlessValid(c, 'ab', XFormField.TYPE_TEXT)
self.failUnlessValid(c, 'abcdef', XFormField.TYPE_TEXT)
def testMaxLenConstraint(self):
msg = 'error message'
c = XFormFieldConstraint(type='max_len', test='3', message=msg)
self.failUnlessValid(c, 'a', XFormField.TYPE_TEXT)
self.failUnlessValid(c, '', XFormField.TYPE_TEXT)
self.failUnlessValid(c, None, XFormField.TYPE_TEXT)
self.failUnlessValid(c, 'abc', XFormField.TYPE_TEXT)
self.failIfValid(c, 'abcdef', XFormField.TYPE_TEXT)
def testReqValConstraint(self):
msg = 'error message'
c = XFormFieldConstraint(type='req_val', message=msg)
self.failUnlessValid(c, 'a', XFormField.TYPE_TEXT)
self.failUnlessValid(c, 0, XFormField.TYPE_INT)
self.failUnlessValid(c, '1.20', XFormField.TYPE_FLOAT)
self.failIfValid(c, '', XFormField.TYPE_TEXT)
self.failIfValid(c, None, XFormField.TYPE_TEXT)
def testRegexConstraint(self):
msg = 'error message'
c = XFormFieldConstraint(type='regex', test='^(mal|fev)$', message=msg)
self.failIfValid(c, 'a', XFormField.TYPE_TEXT)
self.failIfValid(c, '', XFormField.TYPE_TEXT)
self.failIfValid(c, 'malo', XFormField.TYPE_TEXT)
self.failUnlessValid(c, None, XFormField.TYPE_TEXT)
self.failUnlessValid(c, 'MAL', XFormField.TYPE_TEXT)
self.failUnlessValid(c, 'FeV', XFormField.TYPE_TEXT)
def testIntField(self):
field = self.xform.fields.create(field_type=XFormField.TYPE_INT, name='number', command='number')
self.failUnlessClean(field, '1 ', XFormField.TYPE_INT)
self.failUnlessClean(field, None, XFormField.TYPE_TEXT)
self.failUnlessClean(field, '', XFormField.TYPE_TEXT)
self.failIfClean(field, 'abc', XFormField.TYPE_TEXT)
self.failIfClean(field, '1.34', XFormField.TYPE_FLOAT)
def testDecField(self):
field = self.xform.fields.create(field_type=XFormField.TYPE_FLOAT, name='number', command='number')
self.failUnlessClean(field, '1', XFormField.TYPE_INT)
self.failUnlessClean(field, ' 1.1', XFormField.TYPE_FLOAT)
self.failUnlessClean(field, None, XFormField.TYPE_TEXT)
self.failUnlessClean(field, '', XFormField.TYPE_TEXT)
self.failIfClean(field, 'abc', XFormField.TYPE_TEXT)
def testStrField(self):
field = self.xform.fields.create(field_type=XFormField.TYPE_TEXT, name='string', command='string')
self.failUnlessClean(field, '1', XFormField.TYPE_INT)
self.failUnlessClean(field, '1.1', XFormField.TYPE_FLOAT)
self.failUnlessClean(field, 'abc', XFormField.TYPE_TEXT)
self.failUnlessClean(field, None, XFormField.TYPE_TEXT)
self.failUnlessClean(field, '', XFormField.TYPE_TEXT)
def testGPSField(self):
field = self.xform.fields.create(field_type=XFormField.TYPE_GEOPOINT, name='location', command='location')
self.failUnlessClean(field, '1 2', XFormField.TYPE_GEOPOINT)
self.failUnlessClean(field, '1.1 1', XFormField.TYPE_GEOPOINT)
self.failUnlessClean(field, '-1.1 -1.123', XFormField.TYPE_GEOPOINT)
self.failUnlessClean(field, '', XFormField.TYPE_GEOPOINT)
self.failUnlessClean(field, None, XFormField.TYPE_GEOPOINT)
self.failIfClean(field, '1.123', XFormField.TYPE_GEOPOINT)
self.failIfClean(field, '1.123 asdf', XFormField.TYPE_GEOPOINT)
self.failIfClean(field, 'asdf', XFormField.TYPE_GEOPOINT)
self.failIfClean(field, '-91.1 -1.123', XFormField.TYPE_GEOPOINT)
self.failIfClean(field, '92.1 -1.123', XFormField.TYPE_GEOPOINT)
self.failIfClean(field, '-1.1 -181.123', XFormField.TYPE_GEOPOINT)
self.failIfClean(field, '2.1 181.123', XFormField.TYPE_GEOPOINT)
def testFieldConstraints(self):
field = self.xform.fields.create(field_type=XFormField.TYPE_TEXT, name='number', command='number')
# test that with no constraings, all values work
self.failUnlessClean(field, '1', XFormField.TYPE_TEXT)
self.failUnlessClean(field, None, XFormField.TYPE_TEXT)
self.failUnlessClean(field, 'abc', XFormField.TYPE_TEXT)
# now add some constraints
msg1 = 'error message'
field.constraints.create(type='min_val', test='10', message=msg1)
self.failIfClean(field, '1', XFormField.TYPE_TEXT)
self.failIfClean(field, '-1', XFormField.TYPE_TEXT)
self.failUnlessClean(field, '10', XFormField.TYPE_TEXT)
# add another constraint
msg2 = 'error message 2'
field.constraints.create(type='max_val', test='50', message=msg2)
self.failIfClean(field, '1', XFormField.TYPE_TEXT)
self.failUnlessClean(field, '10', XFormField.TYPE_TEXT)
self.failIfClean(field, '100', XFormField.TYPE_TEXT)
# another, but set its order to be first
msg3 = 'error message 3'
field.constraints.create(type='min_val', test='5', message=msg3, order=0)
self.failIfClean(field, '1', XFormField.TYPE_TEXT)
self.failIfClean(field, '6', XFormField.TYPE_TEXT)
class TestProfile(models.Model):
user = models.OneToOneField(User)
connection = models.ForeignKey(Connection)
@classmethod
def lookup_by_connection(cls, connection):
matches = TestProfile.objects.filter(connection=connection)
if matches:
return matches[0]
else:
return None
class SubmissionTest_BROKEN(): #pragma: no cover
def setUp(self):
settings.AUTHENTICATE_XFORMS = False
settings.AUTH_PROFILE_MODEL = None
# bootstrap a form
self.user = User.objects.create_user('fred', 'fred@wilma.com', 'secret')
self.user.save()
self.xform = XForm.on_site.create(name='test', keyword='survey', owner=self.user,
site=Site.objects.get_current(), response='thanks')
self.gender_field = self.xform.fields.create(field_type=XFormField.TYPE_TEXT, name='gender', command='gender', order=1)
self.gender_field.constraints.create(type='req_val', test='None', message="You must include a gender")
self.field = self.xform.fields.create(field_type=XFormField.TYPE_INT, name='age', command='age', order=2)
self.field.constraints.create(type='req_val', test='None', message="You must include an age")
self.name_field = self.xform.fields.create(field_type=XFormField.TYPE_TEXT, name='name', command='name', order=4)
def testDataTypes(self):
field = self.xform.fields.create(field_type=XFormField.TYPE_TEXT, name='field', command='field', order=1)
self.failUnlessEqual(field.datatype, 'text')
field.field_type=XFormField.TYPE_INT
field.save()
self.failUnlessEqual(field.datatype, 'int')
def testOrdering(self):
# submit a record, some errors only occur after there is at least one
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey +age 10 +name matt berg +gender male"))
fields = self.xform.fields.all()
self.failUnlessEqual(self.gender_field.pk, fields[0].pk)
self.failUnlessEqual(self.field.pk, fields[1].pk)
self.failUnlessEqual(self.name_field.pk, fields[2].pk)
# move gender to the back
self.gender_field.order = 10
self.gender_field.save()
fields = self.xform.fields.all()
self.failUnlessEqual(self.field.pk, fields[0].pk)
self.failUnlessEqual(self.name_field.pk, fields[1].pk)
self.failUnlessEqual(self.gender_field.pk, fields[2].pk)
def testSlugs(self):
field = self.xform.fields.create(field_type=XFormField.TYPE_TEXT, name='field', command='foo', order=1)
self.failUnlessEqual(field.slug, 'survey_foo')
field.command = 'bar'
field.save()
self.failUnlessEqual(field.slug, 'survey_bar')
# rename our form
self.xform.keyword = 'roger'
self.xform.save()
field = XFormField.on_site.get(pk=field)
self.failUnlessEqual(field.slug, 'roger_bar')
def testSMSSubmission(self):
self.assertEquals('thanks', self.xform.response)
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey +age 10 +name matt berg +gender male"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
# make sure case doesn't matter
submission = self.xform.process_sms_submission(IncomingMessage(None, "Survey +age 10 +name matt berg +gender male"))
self.failUnlessEqual(submission.has_errors, False)
# make sure it works with space in front of keyword
submission = self.xform.process_sms_submission(IncomingMessage(None, " survey male 10 +name matt berg"))
self.failUnlessEqual(submission.has_errors, False)
# test with just an age and gender
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 2)
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
# mix of required and not
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10 +name matt berg"))
self.failUnlessEqual('thanks', submission.response)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
# make sure optional works as well
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10 matt"))
self.failUnlessEqual('thanks', submission.response)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
# make sure we record errors if there is a missing age
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey +name luke skywalker"))
self.failUnlessEqual(submission.has_errors, True)
# our response should be an error message
self.failIfEqual('thanks', submission.response)
self.failUnlessEqual(2, len(submission.errors))
# make sure we record errors if there is just the keyword
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey"))
self.failUnlessEqual(submission.has_errors, True)
self.failUnlessEqual(2, len(submission.errors))
def testSingleFieldSpecialCase(self):
special_xform = XForm.on_site.create(name='test special', keyword='reg', owner=self.user, separator=',',
site=Site.objects.get_current(), response='thanks')
field = special_xform.fields.create(field_type=XFormField.TYPE_TEXT, name='name', command='name')
submission = special_xform.process_sms_submission(IncomingMessage(None, "+reg davey crockett"))
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'davey crockett')
def testSignal(self):
# add a listener to our signal
class Listener:
def handle_submission(self, sender, **args):
if args['xform'].keyword == 'survey':
self.submission = args['submission']
self.xform = args['xform']
listener = Listener()
xform_received.connect(listener.handle_submission)
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10 +name matt berg"))
self.failUnlessEqual(listener.submission, submission)
self.failUnlessEqual(listener.xform, self.xform)
# test that it works via update as well
new_vals = { 'age': 20, 'name': 'greg snider' }
self.xform.update_submission_from_dict(submission, new_vals)
self.failUnlessEqual(listener.submission.values.get(attribute__name='age').value, 20)
self.failUnlessEqual(listener.submission.values.get(attribute__name='name').value, 'greg snider')
def testUpdateFromDict(self):
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male +age 10 +name matt berg"))
self.failUnlessEqual(len(submission.values.all()), 3)
# now update the form using a dict
new_vals = { 'age': 20, 'name': 'greg snider' }
self.xform.update_submission_from_dict(submission, new_vals)
self.failUnlessEqual(len(submission.values.all()), 2)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 20)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'greg snider')
# make sure removal case works
new_vals = { 'age': 30 }
self.xform.update_submission_from_dict(submission, new_vals)
self.failUnlessEqual(len(submission.values.all()), 1)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 30)
def testCustomField(self):
# register Users as being an XForm field
def lookup_user(command, username):
return User.objects.get(username=username)
XFormField.register_field_type('user', 'User', lookup_user,
xforms_type='string', db_type=XFormField.TYPE_OBJECT)
# add a user field to our xform
field = self.xform.fields.create(field_type='user', name='user', command='user', order=3)
field.constraints.create(type='req_val', test='None', message="You must include a user")
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10 fred"))
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='user').value, self.user)
def testConfirmationId(self):
self.xform.save()
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10"))
self.assertEquals(1, submission.confirmation_id)
# and another
submission2 = self.xform.process_sms_submission(IncomingMessage(None, "survey male 12"))
self.assertEquals(2, submission2.confirmation_id)
self.xform2 = XForm.on_site.create(name='test2', keyword='test2', owner=self.user,
site=Site.objects.get_current())
submission3 = self.xform2.process_sms_submission(IncomingMessage(None, "test2"))
self.assertEquals(1, submission3.confirmation_id)
submission4 = self.xform.process_sms_submission(IncomingMessage(None, "survey male 21"))
self.assertEquals(3, submission4.confirmation_id)
# that resaving the submission doesn't increment our id
submission5 = self.xform.process_sms_submission(IncomingMessage(None, "survey male 22"))
self.assertEquals(4, submission5.confirmation_id)
submission5.raw = "foo"
submission5.save()
self.assertEquals(4, submission5.confirmation_id)
submission6 = self.xform.process_sms_submission(IncomingMessage(None, "survey male 23"))
self.assertEquals(5, submission6.confirmation_id)
def testTemplateResponse(self):
# codify the confirmation id
self.xform.response = 'Your confirmation id is: {{ confirmation_id|codify:"SA" }}'
self.xform.save()
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10"))
# should be safe to use a static value since we are the first test
self.failUnlessEqual(submission.response, "Your confirmation id is: SA0001")
# no prefix
self.xform.response = 'Your confirmation id is: {{ confirmation_id|codify }}'
self.xform.save()
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10"))
# should be safe to use a static value since we are the first test
self.failUnlessEqual(submission.response, "Your confirmation id is: 0002")
# now test no template
self.xform.response = "Thanks for sending your message"
self.xform.save()
# assert the message response is right
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10"))
self.failUnlessEqual(submission.response, self.xform.response)
# now change the xform to return the age and gender
self.xform.response = "You recorded an age of {{ age }} and a gender of {{ gender }}. Your confirmation id is {{ confirmation_id }}."
self.xform.save()
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10"))
self.failUnlessEqual(submission.response, "You recorded an age of 10 and a gender of male. Your confirmation id is 4.")
# if they insert a command that isn't there, it should just be empty
self.xform.response = "You recorded an age of {{ age }} and a gender of {{ gender }}. {{ not_there }} Thanks."
self.xform.save()
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10"))
self.failUnlessEqual(submission.response, "You recorded an age of 10 and a gender of male. Thanks.")
# make sure template arguments work
self.xform.response = "The two values together are: {{ age|add:gender }}."
self.xform.save()
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10"))
self.failUnlessEqual(submission.response, "The two values together are: 10.")
# assert we don't let forms save with templates that fail
self.xform.response = "You recorded an age of {{ bad template }}"
try:
self.xform.save()
self.fail("Should have failed in save.")
except Exception as e:
# expected exception because the template is bad, let it pass
pass
def testCommandPrefixes(self):
# set the prefix to '-' instead of '+'
self.xform.command_prefix = '-'
self.xform.save()
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey -age 10 -name matt berg -gender male"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
# test duplicating the prefix or having junk in it
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey -age 10 --name matt berg -+gender male"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
# set the prefix to nothing instead of '+'
self.xform.command_prefix = None
self.xform.save()
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey age 10 name matt berg gender male"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
# test mix of required and not required
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10 name matt berg"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
def testSeparators(self):
self.xform.separator = None
self.xform.save()
# this is also testing an edge case of a value being 0
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 0 matt"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 0)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
self.xform.separator = ","
self.xform.save()
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey,male,10,matt berg"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male, 10, matt berg"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male,10,matt berg"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male, , 10,,, matt berg"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male,10, +name bniz berg"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'bniz berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male,10 +name bniz berg"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'bniz berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male,10,, +name bniz berg"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'bniz berg')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
def testCustomKeywordPrefix(self):
self.xform.keyword_prefix = '+'
submission = self.xform.process_sms_submission(IncomingMessage(None, " +survey male 10 matt"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
submission = self.xform.process_sms_submission(IncomingMessage(None, " + survey male 10 matt"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
submission = self.xform.process_sms_submission(IncomingMessage(None, " ++ survey male 10 matt"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='age').value, 10)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, 'matt')
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, 'male')
def testCustomResponse(self):
# add a listener to our signal to change what our response will be
class Listener:
def handle_submission(self, sender, **args):
if args['xform'].keyword == 'survey':
self.submission = args['submission']
self.xform = args['xform']
# make sure our template variables are set on the submission
template_vars = self.submission.template_vars
# set our response to 'hello world' instead of 'thanks'
self.submission.response = XForm.render_response("hello world {{ age }}", template_vars)
listener = Listener()
xform_received.connect(listener.handle_submission)
submission = self.xform.process_sms_submission(IncomingMessage(None, "survey male 10 +name matt berg"))
self.failUnlessEqual(listener.submission, submission)
self.failUnlessEqual(listener.xform, self.xform)
self.failUnlessEqual("hello world 10", submission.response)
# test that it works via update as well
new_vals = { 'age': 20, 'name': 'greg snider' }
self.xform.update_submission_from_dict(submission, new_vals)
self.failUnlessEqual(listener.submission.values.get(attribute__name='age').value, 20)
self.failUnlessEqual(listener.submission.values.get(attribute__name='name').value, 'greg snider')
def testFindForm(self):
"""
Tests how we find which form a particular message matches.
"""
# test simple case
self.assertEquals(self.xform, XForm.find_form("survey"))
self.assertEquals(None, XForm.find_form("missing"))
# have another form that is similar, to test that we match correctly in exact matches
surve_form = XForm.on_site.create(name='surve', keyword='surve', owner=self.user,
site=Site.objects.get_current(), response='thanks')
self.assertEquals(self.xform, XForm.find_form("survey hello world"))
self.assertFalse(XForm.find_form("foobar hello world"))
# make sure we match existing forms exactly
self.assertEquals(surve_form, XForm.find_form("surve hello world"))
self.assertFalse(XForm.find_form("survy hello world"))
self.assertFalse(XForm.find_form("survye hello world"))
self.assertEquals(self.xform, XForm.find_form("0survey hello world"))
self.assertEquals(self.xform, XForm.find_form(" survey hello world"))
self.assertEquals(self.xform, XForm.find_form(".survey hello world"))
self.assertEquals(self.xform, XForm.find_form("furvey hello world"))
self.assertEquals(self.xform, XForm.find_form("..survey hello world"))
self.assertEquals(self.xform, XForm.find_form(".+survey hello world"))
# quotes
self.assertEquals(self.xform, XForm.find_form("'survey' hello world"))
self.assertEquals(self.xform, XForm.find_form("'survey', hello world"))
self.assertEquals(self.xform, XForm.find_form("survey,hello world"))
# shouldn't pass, edit distance of 2
self.assertEquals(None, XForm.find_form("furvey1 hello world"))
surve_form.delete()
# wrong keyword
self.assertFalse(XForm.find_form("foobar hello world"))
# fuzzy match tests when only one form exists
self.assertEquals(self.xform, XForm.find_form("surve hello world"))
self.assertEquals(self.xform, XForm.find_form("survy hello world"))
self.assertEquals(self.xform, XForm.find_form("survye hello world"))
self.assertEquals(self.xform, XForm.find_form("0survey hello world"))
self.assertEquals(self.xform, XForm.find_form(" survey hello world"))
self.assertEquals(self.xform, XForm.find_form(".survey hello world"))
self.assertEquals(self.xform, XForm.find_form("furvey hello world"))
self.assertEquals(self.xform, XForm.find_form("..survey hello world"))
self.assertEquals(self.xform, XForm.find_form(".+survey hello world"))
self.assertEquals(self.xform, XForm.find_form("+survey hello world"))
self.assertEquals(self.xform, XForm.find_form("-+-survey hello world"))
# shouldn't pass, edit distance of 2
self.assertFalse(XForm.find_form("furvey1 hello world"))
self.assertFalse(XForm.find_form("10 + 20 +survey hello world"))
self.assertFalse(XForm.find_form("my survey hello world"))
# test when we have a keyword prefix
self.xform.keyword_prefix = '+'
self.xform.save()
# no prefix, no match
self.assertFalse(XForm.find_form("survey hello world"))
# wrong keyword
self.assertFalse(XForm.find_form("foobar hello world"))
# fuzzy match tests when only one form exists
self.assertEquals(self.xform, XForm.find_form("+surve hello world"))
self.assertEquals(self.xform, XForm.find_form("+survy hello world"))
self.assertEquals(self.xform, XForm.find_form("+survye hello world"))
self.assertEquals(self.xform, XForm.find_form("+0survey hello world"))
self.assertEquals(self.xform, XForm.find_form("+ survey hello world"))
self.assertEquals(self.xform, XForm.find_form("+.survey hello world"))
self.assertEquals(self.xform, XForm.find_form("+furvey hello world"))
self.assertEquals(self.xform, XForm.find_form("+..survey hello world"))
self.assertEquals(self.xform, XForm.find_form("+.+survey hello world"))
self.assertEquals(self.xform, XForm.find_form(".+survey hello world"))
self.assertEquals(self.xform, XForm.find_form("--+-survey hello world"))
# shouldn't pass, edit distance of 2
self.assertFalse(XForm.find_form("+furvey1 hello world"))
self.assertFalse(XForm.find_form("10 + 20 +survey hello world"))
self.assertFalse(XForm.find_form("my survey hello world"))
def testApp(self):
"""
Tests that our main app.py handles messages correctly. More detailed testing is done at a unit
level, this just makes sure that the main routing works.
"""
xforms_app = App(None)
msg = IncomingMessage(None, "survey male 10 matt")
self.assertTrue(xforms_app.handle(msg))
self.assertEquals(1, len(self.xform.submissions.all()))
msg = IncomingMessage(None, "foo male 10 matt")
self.assertFalse(xforms_app.handle(msg))
self.assertEquals(1, len(self.xform.submissions.all()))
def testEpi(self):
#+epi ma 12, bd 5
xform = XForm.on_site.create(name='epi_test', keyword='epi', owner=self.user, command_prefix=None,
keyword_prefix = '+', separator = ',;:*.\\s"',
site=Site.objects.get_current(), response='thanks')
f1 = xform.fields.create(field_type=XFormField.TYPE_INT, name='ma', command='ma', order=0)
f2 = xform.fields.create(field_type=XFormField.TYPE_INT, name='bd', command='bd', order=1)
f3 = xform.fields.create(field_type=XFormField.TYPE_INT, name='tb', command='tb', order=2)
f4 = xform.fields.create(field_type=XFormField.TYPE_INT, name='yf', command='yf', order=3)
# huge yellow fever outbreak, from a really poorly-trained texter
submission = xform.process_sms_submission(IncomingMessage(None, "+epi MA12, bd 5. tb0;yf314"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 4)
self.failUnlessEqual(submission.values.get(attribute__name='ma').value, 12)
self.failUnlessEqual(submission.values.get(attribute__name='bd').value, 5)
self.failUnlessEqual(submission.values.get(attribute__name='tb').value, 0)
self.failUnlessEqual(submission.values.get(attribute__name='yf').value, 314)
# missing value
submission = xform.process_sms_submission(IncomingMessage(None, "+epi ma"))
self.failUnless(submission.has_errors)
# duplicate values
submission = xform.process_sms_submission(IncomingMessage(None, "+epi ma 12, ma 5"))
self.failUnless(submission.has_errors)
# zero values
submission = xform.process_sms_submission(IncomingMessage(None, "+epi ma 0"))
self.failIf(submission.has_errors)
#+muac davey crockett, m, 6 months, red
xform = XForm.on_site.create(name='muac_test', keyword='muac', owner=self.user, command_prefix=None,
keyword_prefix = '+', separator = ',',
site=Site.objects.get_current(), response='thanks')
f1 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='name', command='name', order=0)
f1.constraints.create(type='req_val', test='None', message="You must include a name")
f2 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='gender', command='gender', order=1)
f2.constraints.create(type='req_val', test='None', message="You must include a gender")
f3 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='age', command='age', order=2)
f3.constraints.create(type='req_val', test='None', message="You must include an age")
f4 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='length', command='length', order=3)
f4.constraints.create(type='req_val', test='None', message="You must include a length")
submission = xform.process_sms_submission(IncomingMessage(None, "+muac davey crockett, m, 6 months, red"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 4)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, "davey crockett")
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, "m")
self.failUnlessEqual(submission.values.get(attribute__name='age').value, "6 months")
self.failUnlessEqual(submission.values.get(attribute__name='length').value, "red")
#+death malthe borg, m, 5day
xform = XForm.on_site.create(name='death_test', keyword='death', owner=self.user, command_prefix=None,
keyword_prefix = '+', separator = ',',
site=Site.objects.get_current(), response='thanks')
f1 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='name', command='name', order=0)
f2 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='gender', command='gender', order=1)
f3 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='age', command='age', order=2)
submission = xform.process_sms_submission(IncomingMessage(None, "+death malthe borg, m, 5day"))
self.assertEquals(xform, XForm.find_form("+derth malthe borg, m, 5day"))
self.assertEquals(xform, XForm.find_form("+daeth malthe borg, m, 5day"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 3)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, "malthe borg")
self.failUnlessEqual(submission.values.get(attribute__name='gender').value, "m")
self.failUnlessEqual(submission.values.get(attribute__name='age').value, "5day")
def testPullerCustomerField(self):
# Tests creating a field that is based on the connection of the message, not anything in the message
# itself.
def parse_connection(command, value):
# we should be able to find a connection with this identity
matches = Connection.objects.filter(identity=value)
if matches:
return matches[0]
raise ValidationError("%s parameter value of '%s' does not match any connections.")
def pull_connection(command, message):
# this pulls the actual hone number from the message and returns it as a string
# note that in this case we will actually only match if the phone number starts with '072'
identity = message.connection.identity
if identity.startswith('072'):
return identity
else:
return None
XFormField.register_field_type('conn', 'Connection', parse_connection,
xforms_type='string', db_type=XFormField.TYPE_OBJECT, puller=pull_connection)
# create a new form
xform = XForm.on_site.create(name='sales', keyword='sales', owner=self.user,
site=Site.objects.get_current(), response='thanks for submitting your report')
# create a single field for our connection puller
f1 = xform.fields.create(field_type='conn', name='conn', command='conn', order=0)
f1.constraints.create(type='req_val', test='None', message="Missing connection.")
f2 = xform.fields.create(field_type=XFormField.TYPE_INT, name='age', command='age', order=2)
# create some connections to work with
butt = Backend.objects.create(name='foo')
conn1 = Connection.objects.create(identity='0721234567', backend=butt)
conn2 = Connection.objects.create(identity='0781234567', backend=butt)
# check that we parse out the connection correctly
submission = xform.process_sms_submission(IncomingMessage(conn1, "sales 123"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 2)
self.failUnlessEqual(submission.values.get(attribute__name='conn').value, conn1)
self.failUnlessEqual(123, submission.eav.sales_age)
# now try with a connection that shouldn't match
submission = xform.process_sms_submission(IncomingMessage(conn2, "sales"))
self.failUnlessEqual(submission.has_errors, True)
self.failUnlessEqual(len(submission.values.all()), 0)
def testAgeCustomField(self):
# creates a new field type that parses strings into an integer number of days
# ie, given a string like '5days' or '6 months' will return either 5, or 30
import re
# register a time parser
def parse_timespan(command, value):
match = re.match("(\d+)\W*months?", value, re.IGNORECASE)
if match:
return int(match.group(1))*30
match = re.match("(\d+)\W*days?", value, re.IGNORECASE)
if match:
return int(match.group(1))
raise ValidationError("%s parameter value of '%s' is not a valid timespan." % (command, value))
XFormField.register_field_type('timespan', 'Timespan', parse_timespan,
xforms_type='string', db_type=XFormField.TYPE_INT)
# create a new form
xform = XForm.on_site.create(name='time', keyword='time', owner=self.user,
site=Site.objects.get_current(), response='thanks')
f1 = xform.fields.create(field_type='timespan', name='timespan', command='timespan', order=0)
# try five months
submission = xform.process_sms_submission(IncomingMessage(None, "time +timespan 5 months"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 1)
self.failUnlessEqual(submission.values.get(attribute__name='timespan').value, 150)
# try 6 days
submission = xform.process_sms_submission(IncomingMessage(None, "time +timespan 6days"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 1)
self.failUnlessEqual(submission.values.get(attribute__name='timespan').value, 6)
# something invalid
submission = xform.process_sms_submission(IncomingMessage(None, "time +timespan infinity plus one"))
self.failUnlessEqual(submission.has_errors, True)
def testImportSubmissions(self):
# our fake submitter
backend, created = Backend.objects.get_or_create(name='test')
connection, created = Connection.objects.get_or_create(identity='123', backend=backend)
# Our form has fields: gender, age, and name (optional)
# try passing a string for an int field
values = { 'gender': 'male', 'age': 'Should be number', 'name': 'Eugene'}
self.assertRaises(ValidationError, self.xform.process_import_submission, "", connection, values)
self.assertEquals(0, len(self.xform.submissions.all()))
# try sending something that is not a string
values = { 'age': 25, 'gender': 'male', 'name': 'Eugene'}
self.assertRaises(TypeError, self.xform.process_import_submission, "", connection, values)
self.assertEquals(0, len(self.xform.submissions.all()))
# try excluding an optional field
values = { 'age': '25', 'gender': 'male'}
self.xform.process_import_submission("", connection, values)
self.assertEquals(1, len(self.xform.submissions.all()))
# try excluding a required field
values = { 'gender': 'male', 'name': 'Eugene'}
self.assertRaises(ValidationError, self.xform.process_import_submission, "", connection, values)
self.assertEquals(1, len(self.xform.submissions.all()))
# check that constraint is upheld
self.field.constraints.create(type='max_val', test='100', message="Nobody is that old")
self.field.constraints.create(type='min_val', test='0', message="You are negative old")
values = { 'gender': 'male', 'age': '900', 'name': 'Eugene'}
self.assertRaises(ValidationError, self.xform.process_import_submission, "", connection, values)
self.assertEquals(1, len(self.xform.submissions.all()))
values = { 'gender': 'male', 'age': '-1', 'name': 'Eugene'}
self.assertRaises(ValidationError, self.xform.process_import_submission, "", connection, values)
self.assertEquals(1, len(self.xform.submissions.all()))
# try sending extra fields that are not in the form
values = { 'gender': 'male', 'age': 'Should be number', 'name': 'Eugene', 'extra': "This shouldn't be provided"}
self.assertRaises(ValidationError, self.xform.process_import_submission, "", connection, values)
self.assertEquals(1, len(self.xform.submissions.all()))
# try sending extra fields that are not in the form
values = { 'gender': 'male', 'age': '99', 'name': 'Eugene'}
self.xform.process_import_submission("", connection, values)
self.assertEquals(2, len(self.xform.submissions.all()))
def testMultimediaOptionalOnSMS(self):
xform = XForm.on_site.create(name='image', keyword='image', owner=self.user, command_prefix='+',
site=Site.objects.get_current(), response='thanks')
f1 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='name', command='name', order=0)
f2 = xform.fields.create(field_type=XFormField.TYPE_IMAGE, name='image', command='image', order=1)
f3 = xform.fields.create(field_type=XFormField.TYPE_AUDIO, name='audio', command='audio', order=2)
f4 = xform.fields.create(field_type=XFormField.TYPE_VIDEO, name='video', command='video', order=3)
# make the photo field required, though this will only really kick in during SMS submission
f2.constraints.create(type='req_val', test='None', message="You must include an image")
f3.constraints.create(type='req_val', test='None', message="You must include audio")
f4.constraints.create(type='req_val', test='None', message="You must include a video")
submission = xform.process_sms_submission(IncomingMessage(None, "image +name Michael Jackson"))
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 1)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, "Michael Jackson")
def testODKDefinition(self):
xform = XForm.on_site.create(name='multimedia', keyword='multimedia', owner=self.user, command_prefix='+',
site=Site.objects.get_current(), response='thanks')
f1 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='name', command='name', order=0)
f2 = xform.fields.create(field_type=XFormField.TYPE_IMAGE, name='image', command='image', order=1)
f3 = xform.fields.create(field_type=XFormField.TYPE_AUDIO, name='audio', command='audio', order=2)
f4 = xform.fields.create(field_type=XFormField.TYPE_VIDEO, name='video', command='video', order=3)
c = Client()
response = c.get("/xforms/odk/get/%d/" % xform.id)
self.assertEquals(200, response.status_code)
from xml.dom.minidom import parseString
xml = parseString(response.content)
body = xml.getElementsByTagName("h:body")[0]
inputs = body.getElementsByTagName("input")
self.assertEquals(1, len(inputs))
self.assertEquals("name", inputs[0].getAttribute("ref"))
uploads = body.getElementsByTagName("upload")
self.assertEquals(3, len(uploads))
self.assertEquals("image", uploads[0].getAttribute("ref"))
self.assertEquals("image/*", uploads[0].getAttribute("mediatype"))
self.assertEquals("audio", uploads[1].getAttribute("ref"))
self.assertEquals("audio/*", uploads[1].getAttribute("mediatype"))
self.assertEquals("video", uploads[2].getAttribute("ref"))
self.assertEquals("video/*", uploads[2].getAttribute("mediatype"))
def testODKSubmission(self):
xform = XForm.on_site.create(name='multimedia', keyword='multimedia', owner=self.user, command_prefix='+',
site=Site.objects.get_current(), response='thanks')
f1 = xform.fields.create(field_type=XFormField.TYPE_TEXT, name='name', command='name', order=0)
f2 = xform.fields.create(field_type=XFormField.TYPE_IMAGE, name='image', command='image', order=1)
f3 = xform.fields.create(field_type=XFormField.TYPE_AUDIO, name='audio', command='audio', order=2)
f4 = xform.fields.create(field_type=XFormField.TYPE_VIDEO, name='video', command='video', order=3)
xml = "<?xml version='1.0' ?><data>"
"<audio>test__audio.jpg</audio><video>test__video.jpg</video><name>Michael Jackson</name></data>"
# build up our dict of xml values and binaries
binaries = dict()
values = dict()
values['name'] = "Michael Jackson"
values['image'] = "test__image.jpg"
values['audio'] = "test__audio.mp3"
values['video'] = "test__video.mp4"
binaries['test__image.jpg'] = "jpgimage"
binaries['test__audio.mp3'] = "mp3file"
binaries['test__video.mp4'] = "vidfile"
# remove those files if they exist
directory = os.path.join(settings.MEDIA_ROOT, 'binary')
for name in ['test__image.jpg', 'test__audio.mp3', 'test__video.mp4']:
try:
os.remove(os.path.join(directory, name))
except:
pass
submission = xform.process_odk_submission(xml, values, binaries)
self.failUnlessEqual(submission.has_errors, False)
self.failUnlessEqual(len(submission.values.all()), 4)
self.failUnlessEqual(submission.values.get(attribute__name='name').value, "Michael Jackson")
binary = submission.values.get(attribute__name='image').value.binary
self.failUnlessEqual("binary/test__image.jpg", binary.name)
self.failUnlessEqual("jpgimage", binary.read())
binary = submission.values.get(attribute__name='audio').value.binary
self.failUnlessEqual("binary/test__audio.mp3", binary.name)
self.failUnlessEqual("mp3file", binary.read())
binary = submission.values.get(attribute__name='video').value.binary
self.failUnlessEqual("binary/test__video.mp4", binary.name)
self.failUnlessEqual("vidfile", binary.read())
def testRestrictMessage(self):
c = Client()
self.group = Group.objects.create(name="Reporters")
c.login(username="fred", password="secret")
# try creating a new xform with no restrict_to
form_values = dict(name="Perm Form", keyword='perm', description="Permission test form",
response="You were able to submit this you special person")
resp = c.post(reverse('xforms_create'), form_values, follow=True)
self.assertEquals(200, resp.status_code)
form = XForm.objects.get(keyword='perm')
self.assertTrue(form)
# reset
form.delete()
# this time we submit with a group to restrict to, but no message
form_values['restrict_to'] = self.group.id
resp = c.post(reverse('xforms_create'), form_values, follow=True)
# should fail
self.assertEquals(200, resp.status_code)
self.assertTrue(resp.context['form'].errors)
# but if we add in a message
form_values['restrict_message'] = "Sorry, you don't have permission to submit this form"
resp = c.post(reverse('xforms_create'), form_values, follow=True)
# should pass
self.assertEquals(200, resp.status_code)
form = XForm.objects.get(keyword='perm')
self.assertTrue(form)
def testODKAuth(self):
c = Client()
response = c.get(reverse('odk_list'))
# we should get a 200 back, there are no restrictions on viewing forms
self.assertEquals(200, response.status_code)
from xml.dom.minidom import parseString
xml = parseString(response.content)
forms = xml.getElementsByTagName("forms")[0].getElementsByTagName("form")
self.assertEquals(1, len(forms))
self.assertEquals('test', forms[0].firstChild.wholeText)
settings.AUTHENTICATE_XFORMS = True
# try again now
response = c.get(reverse('odk_list'))
# we should be asked to authenticate
self.assertEquals(401, response.status_code)
def testODKFiltering(self):
# tests that we only show those forms that we are allowed to view
c = Client()
c.login(username="fred", password="secret")
# we are aren't going to force authentication using DIGEST but instead
# use the session authentication used in Django for these tests
settings.AUTHENTICATE_XFORMS = False
response = c.get(reverse('odk_list'))
from xml.dom.minidom import parseString
xml = parseString(response.content)
# no restrictions on this form, so should see just one form
forms = xml.getElementsByTagName("forms")[0].getElementsByTagName("form")
self.assertEquals(1, len(forms))
self.assertEquals('test', forms[0].firstChild.wholeText)
# create a group for reporters (fred not part of it)
self.group = Group.objects.create(name="Reporters")
restricted_form = XForm.on_site.create(name='restricted', keyword='restricted', owner=self.user, command_prefix='+',
site=Site.objects.get_current(), response='thanks',
restrict_message="Sorry, you can't access this form")
restricted_form.restrict_to.add(self.group)
# get the list again, should not include this form
response = c.get(reverse('odk_list'))
xml = parseString(response.content)
forms = xml.getElementsByTagName("forms")[0].getElementsByTagName("form")
self.assertEquals(1, len(forms))
self.assertEquals('test', forms[0].firstChild.wholeText)
# have fred join the reporters group
self.user.groups.add(self.group)
# now he should get all the forms
response = c.get(reverse('odk_list'))
xml = parseString(response.content)
forms = xml.getElementsByTagName("forms")[0].getElementsByTagName("form")
self.assertEquals(2, len(forms))
self.assertEquals('test', forms[0].firstChild.wholeText)
self.assertEquals('restricted', forms[1].firstChild.wholeText)
def testODKGetSecurity(self):
c = Client()
form_url = reverse('odk_form', args=[self.xform.id])
# fetching it normally, no problem
response = c.get(form_url)
self.assertEquals(200, response.status_code)
# fetching it when restricted, 401
self.group = Group.objects.create(name="Reporters")
self.user.groups.add(self.group)
self.xform.restrict_to.add(self.group)
response = c.get(form_url)
self.assertEquals(403, response.status_code)
# logged in then ok
c.login(username="fred", password="secret")
response = c.get(form_url)
self.assertEquals(200, response.status_code)
# remove restriction, still ok
self.xform.restrict_to.remove(self.group)
response = c.get(form_url)
self.assertEquals(200, response.status_code)
def testUserLookup(self):
"""
Tests that we can look up a user by a connection if a model with a
connection_set is used as the Django Profile object
"""
butt = Backend.objects.create(name='fanny')
conn1 = Connection.objects.create(identity='0721234567', backend=butt)
conn2 = Connection.objects.create(identity='0781234567', backend=butt)
# first try with no profile set
self.assertEquals(None, lookup_user_by_connection(conn1))
# create a profile class
settings.AUTH_PROFILE_MODEL = 'rapidsms_xforms.tests.TestProfile'
# profile set but no profile objects mapped should still be no connection
self.assertEquals(None, lookup_user_by_connection(conn1))
# but now let's associate our user with our connection
TestProfile.objects.create(user=self.user, connection=conn1)
# now we should get the user for this connection
self.assertEquals(self.user, lookup_user_by_connection(conn1))
# but nothing for our other connection
self.assertEquals(None, lookup_user_by_connection(conn2))
def testSMSSecurity(self):
"""
Tests that forms with restrict_to set will only accept submissions that are valid.
"""
# add restrict to and restict_message to our xform
self.group = Group.objects.create(name="Reporters")
self.xform.restrict_to.add(self.group)
self.xform.restrict_message = "You don't have permission to submit this form"
self.xform.save()
butt = Backend.objects.create(name='fanny')
conn1 = Connection.objects.create(identity='0721234567', backend=butt)
conn2 = Connection.objects.create(identity='0781234567', backend=butt)
submission = self.xform.process_sms_submission(IncomingMessage(conn1, "survey +age 10 +name matt berg +gender male"))
# should have an error
self.assertEquals(True, submission.has_errors)
self.assertEquals(self.xform.restrict_message, submission.response)
# now add the profile model
settings.AUTH_PROFILE_MODEL = 'rapidsms_xforms.tests.TestProfile'
# add our user to the group
self.user.groups.add(self.group)
TestProfile.objects.create(connection=conn1, user=self.user)
# now we should be able to submit
submission = self.xform.process_sms_submission(IncomingMessage(conn1, "survey +age 10 +name matt berg +gender male"))
self.assertEquals(False, submission.has_errors)
# but conn2 cannot
submission = self.xform.process_sms_submission(IncomingMessage(conn2, "survey +age 10 +name matt berg +gender male"))
self.assertEquals(True, submission.has_errors)
self.assertEquals(self.xform.restrict_message, submission.response)
|
|
'''
Visgraph supports backing the graph objects with a postgres db.
'''
import psycopg2
import traceback
import collections
import visgraph.graphcore as vg_graphcore
init_db = '''
DROP TABLE IF EXISTS vg_edges;
CREATE TABLE vg_edges (
eid BIGSERIAL,
n1 BIGINT,
n2 BIGINT,
created TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (eid)
);
CREATE INDEX vg_edges_idx_n1 ON vg_edges (n1);
CREATE INDEX vg_edges_idx_n2 ON vg_edges (n2);
DROP TABLE IF EXISTS vg_edge_props;
CREATE TABLE vg_edge_props (
eid BIGINT,
pname VARCHAR(256) NOT NULL,
intval BIGINT,
strval VARCHAR(1024),
created TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (eid, pname)
);
CREATE INDEX vg_edge_eid_idx ON vg_edge_props (eid);
CREATE INDEX vg_edge_pname_intval ON vg_edge_props (pname, intval);
CREATE INDEX vg_edge_pname_strval ON vg_edge_props (pname, strval);
DROP TABLE IF EXISTS vg_nodes;
CREATE TABLE vg_nodes (
nid BIGSERIAL,
created TIMESTAMP DEFAULT NOW(),
PRIMARY KEY(nid)
);
DROP TABLE IF EXISTS vg_node_props;
CREATE TABLE vg_node_props (
nid BIGINT NOT NULL,
pname VARCHAR(255) NOT NULL,
intval BIGINT,
strval VARCHAR(1024),
created TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (nid,pname)
);
CREATE INDEX vg_node_nid_idx ON vg_node_props (nid);
CREATE INDEX vg_node_pname_intval ON vg_node_props (pname, intval);
CREATE INDEX vg_node_pname_strval ON vg_node_props (pname, strval);
'''
# Exanmple database creds...
default_dbinfo = {
'user':'visgraph',
'password':'ohhai!',
'database':'visgraph',
# Add host if you want...
}
def initGraphDb(dbinfo):
db = psycopg2.connect(**dbinfo)
c = db.cursor()
c.execute(init_db)
c.close()
db.commit()
db.close()
# Rollback transactions on exception
def rollsafe(f):
def doroll(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception, e:
traceback.print_exc()
try:
args[0].db.rollback()
except Exception, e:
pass
raise
doroll.__doc__ == f.__doc__
doroll.__name__ == f.__name__
return doroll
class DbGraphStore:
'''
A DbGraphStore object may be used for all the standard management
of node and edge information but may not be used for path queries.
FIXME possibly make it able to do path queries but *really* slow?
Use the buildSubGraph() API to pull path serchable graphs out of
the DBGraphStore.
'''
def __init__(self, dbinfo=None):
if dbinfo == False:
return
if dbinfo == None:
dbinfo = default_dbinfo
self.dbinfo = dbinfo
self.db = psycopg2.connect(**dbinfo)
self.autocommit = True
@rollsafe
def _doSelect(self, query, *args):
'''
For now, a fetchall based select wrapper.
'''
c = self.db.cursor()
c.execute(query, args)
res = c.fetchall()
c.close()
if self.autocommit:
self.db.commit()
return res
@rollsafe
def _doInsert(self, query, *args):
'''
Standard insert wrapper.
'''
c = self.db.cursor()
c.execute(query, args)
c.close()
if self.autocommit:
self.db.commit()
@rollsafe
def _doUpdate(self, query, *args):
'''
Do an update with 'returning' syntax to know
if an update was made.
'''
res = []
c = self.db.cursor()
c.execute(query, args)
res = c.fetchall()
c.close()
if self.autocommit:
self.db.commit()
return res
@rollsafe
def _doInsertRetId(self, query, *args):
'''
Insert with a returning value.
'''
c = self.db.cursor()
c.execute(query, args)
hid = c.fetchall()[0][0]
c.close()
if self.autocommit:
self.db.commit()
return hid
@rollsafe
def _doInsertRetIds(self, query, *args):
'''
Insert with a returning list of IDs.
'''
c = self.db.cursor()
c.execute(query, args)
rows = c.fetchall()
c.close()
if self.autocommit:
self.db.commit()
return [ r[0] for r in rows ]
def _doCommit(self):
self.db.commit()
def addNode(self, nodeid=None, ninfo=None, **kwargs):
if nodeid != None:
raise Exception('DbGraphStore Manages nodeid!')
q = 'INSERT INTO vg_nodes DEFAULT VALUES RETURNING nid'
nid = self._doInsertRetId(q)
if ninfo != None:
kwargs.update(ninfo)
for key,val in kwargs.items():
self.setNodeProp(nid, key, val)
return nid
def delEdge(self, eid):
'''
Delete an edge from the graph database.
Example: g.delEdge(eid)
'''
q = '''
DELETE FROM
vg_edge_props
WHERE
eid = %s
'''
self._doInsert(q, eid)
q = '''
DELETE FROM
vg_edges
WHERE
eid = %s
'''
self._doInsert(q, eid)
def delNode(self, nid):
'''
Delete the given node (and his edges) from the graph dbase.
Example: g.delNode(nid)
NOTE: this will delete any edges which go to or from nid!
'''
# Delete edge properties to and from
q = '''
DELETE FROM
vg_edge_props
USING
vg_edges
WHERE
vg_edges.n1 = %s
AND
vg_edges.eid = vg_edge_props.eid
'''
self._doInsert(q, nid)
q = '''
DELETE FROM
vg_edge_props
USING
vg_edges
WHERE
vg_edges.n2 = %s
AND
vg_edges.eid = vg_edge_props.eid
'''
self._doInsert(q, nid)
# Delete edges to and from
q = '''
DELETE FROM
vg_edges
WHERE
vg_edges.n1 = %s
'''
self._doInsert(q, nid)
q = '''
DELETE FROM
vg_edges
WHERE
vg_edges.n2 = %s
'''
self._doInsert(q, nid)
# Delete from node properties
q = '''
DELETE FROM
vg_node_props
WHERE
nid = %s
'''
self._doInsert(q, nid)
q = '''
DELETE FROM
vg_nodes
WHERE
nid = %s
'''
self._doInsert(q, nid)
def setNodeProp(self, nid, pname, value):
if isinstance(value, bool):
value = int(value)
if isinstance(value, int) or isinstance(value, long):
q = 'UPDATE vg_node_props SET intval=%s,created=NOW() WHERE nid=%s and pname=%s RETURNING nid'
q1 = 'INSERT INTO vg_node_props (nid, pname, intval) VALUES (%s,%s,%s)'
else:
q = 'UPDATE vg_node_props SET strval=%s,created=NOW() WHERE nid=%s and pname=%s RETURNING nid'
q1 = 'INSERT INTO vg_node_props (nid, pname, strval) VALUES (%s,%s,%s)'
# return a value to see if we actually did the update...
res = self._doSelect(q, value, nid, pname)
if len(res) == 0:
self._doInsert(q1, nid, pname, value)
if self.autocommit:
self.db.commit()
def getNodeProp(self, nid, pname, default=None):
q = 'SELECT intval,strval from vg_node_props WHERE nid=%s AND pname=%s'
res = self._doSelect(q, nid, pname)
if len(res) == 0:
return default
intval, strval = res[0]
if intval != None:
return intval
return strval
def delNodeProp(self, nid, pname):
q = 'DELETE FROM vg_node_props WHERE nid=%s AND pname=%s'
self._doInsert(q, nid, pname)
def getNodeProps(self, nid):
ret = {}
q = 'SELECT pname,intval,strval FROM vg_node_props WHERE nid=%s'
for pname,intval,strval in self._doSelect(q, nid):
if intval != None:
ret[pname] = intval
else:
ret[pname] = strval
return ret
def getNodesProps(self, nids):
ret = collections.defaultdict(dict)
q = 'SELECT nid,pname,intval,strval FROM vg_node_props WHERE nid IN %s'
for nid,pname,intval,strval in self._doSelect(q, tuple(nids)):
if intval != None:
ret[nid][pname] = intval
else:
ret[nid][pname] = strval
return ret.items()
def addEdge(self, fromid, toid, eid=None, einfo=None):
if eid != None:
raise Exception('DbGraphStore Manages eid!')
if fromid == None:
raise Exception('Invalid from id (None)!')
if toid == None:
raise Exception('Invalid to id (None)!')
q = 'INSERT INTO vg_edges (n1, n2) VALUES (%s, %s) RETURNING eid'
eid = self._doInsertRetId(q, fromid, toid)
if einfo != None:
for key,val in einfo.items():
self.setEdgeProp(eid, key, val)
return eid
def getRefsFrom(self, nodeid):
'''
Return a list of edges which originate with us.
Example: for eid, fromid, toid, einfo in g.getRefsFrom(id)
'''
q = '''
SELECT
vg_edges.*,
vg_edge_props.*
FROM
vg_edges,
vg_edge_props
WHERE
vg_edges.n1 = %s AND
vg_edges.eid = vg_edge_props.eid
'''
refs = {}
res = self._doSelect(q, nodeid)
for eid,n1,n2,created,eid1,pname,intval,strval,created1 in res:
r = refs.get(eid)
if r == None:
r = (eid, n1, n2, {})
refs[eid] = r
if intval != None:
r[3][pname] = intval
else:
r[3][pname] = strval
return refs.values()
def getRefsTo(self, nodeid):
'''
Return a list of edges which we reference.
Example: for eid, fromid, toid, einfo in g.getRefsTo(id)
'''
q = '''
SELECT
vg_edges.*,
vg_edge_props.*
FROM
vg_edges,
vg_edge_props
WHERE
vg_edges.n2 = %s AND
vg_edges.eid = vg_edge_props.eid
'''
refs = {}
res = self._doSelect(q, nodeid)
for eid,n1,n2,created,eid1,pname,intval,strval,created1 in res:
r = refs.get(eid)
if r == None:
r = (eid, n1, n2, {})
refs[eid] = r
if intval != None:
r[3][pname] = intval
else:
r[3][pname] = strval
return refs.values()
def getRefsFromBulk(self, nids):
'''
Return a list of edges which originate with us.
Supply a list of edges to get refs.
Example: for eid, fromid, toid, einfo in g.getRefsFromBulk(nids)
'''
q = '''
SELECT
vg_edges.eid, vg_edges.n1, vg_edges.n2,
vg_edge_props.pname, vg_edge_props.intval, vg_edge_props.strval
FROM
vg_edges,
vg_edge_props
WHERE
vg_edges.n1 IN (%s) AND
vg_edges.eid = vg_edge_props.eid
'''
if not nids:
return []
refs = {}
qend = ','.join( ['%s',] * len(nids))
q = q % qend
res = self._doSelect(q, *nids)
for eid, n1, n2, pname, intval, strval in res:
r = refs.get(eid)
if r == None:
r = (eid, n1, n2, {})
refs[eid] = r
if intval != None:
r[3][pname] = intval
else:
r[3][pname] = strval
return refs.values()
def getRefsToBulk(self, nids):
'''
Return a list of edges which we reference.
Supply a list of edges to gets refs.
Example: for eid, fromid, toid, einfo in g.getRefsToBulk(nids)
'''
q = '''
SELECT
vg_edges.eid, vg_edges.n1, vg_edges.n2,
vg_edge_props.pname, vg_edge_props.intval, vg_edge_props.strval
FROM
vg_edges,
vg_edge_props
WHERE
vg_edges.n2 IN (%s) AND
vg_edges.eid = vg_edge_props.eid
'''
if not nids:
return []
refs = {}
qend = ','.join( ['%s',] * len(nids))
q = q % qend
res = self._doSelect(q, *nids)
for eid, n1, n2, pname, intval, strval in res:
r = refs.get(eid)
if r == None:
r = (eid, n1, n2, {})
refs[eid] = r
if intval != None:
r[3][pname] = intval
else:
r[3][pname] = strval
return refs.values()
def setEdgeProp(self, eid, pname, value):
if isinstance(value, bool):
value = int(value)
if isinstance(value, int) or isinstance(value, long):
q = 'UPDATE vg_edge_props SET intval=%s WHERE eid=%s and pname=%s RETURNING eid'
q1 = 'INSERT INTO vg_edge_props (eid, pname, intval) VALUES (%s,%s,%s)'
else:
q = 'UPDATE vg_edge_props SET strval=%s WHERE eid=%s and pname=%s RETURNING eid'
q1 = 'INSERT INTO vg_edge_props (eid, pname, strval) VALUES (%s,%s,%s)'
# return a value to see if we actually did the update...
res = self._doSelect(q, value, eid, pname)
if len(res) == 0:
self._doInsert(q1, eid, pname, value)
def getEdgeProp(self, eid, pname, default=None):
q = 'SELECT intval,strval from vg_edge_props WHERE eid=%s AND pname=%s'
res = self._doSelect(q, eid, pname)
if len(res) == 0:
return default
intval, strval = res[0]
if intval != None:
return intval
return strval
def getEdge(self, eid):
'''
Get the edge tuple ( eid, n1, n2, nprops ) for the given edge by id.
'''
q = 'SELECT eid,n1,n2 FROM vg_edges WHERE eid=%s'
res = self._doSelect( q, eid )
if not res:
raise Exception('Invalid Edge Id: %s' % eid)
e,n1,n2 = res[0]
return (eid, n1, n2, self.getEdgeProps( eid ) )
def getEdgeProps(self, eid):
'''
Retrieve the properties dictionary for the given edge id.
'''
ret = {}
q = 'SELECT pname,intval,strval FROM vg_edge_props WHERE eid=%s'
for pname,intval,strval in self._doSelect(q, eid):
if intval != None:
ret[pname] = intval
else:
ret[pname] = strval
return ret
def searchNodes(self, propname, propval=None):
'''
Return (but do not cache forward) the nid's of nodes which
have a property with the following name (and optionally, value).
Example:
for nid in g.searchNodes('woot', 10)
print g.getNodeProp(nid, 'name')
NOTE: This is specific to the DbGraphStore...
'''
if propval == None:
q = 'SELECT nid FROM vg_node_props WHERE pname=%s'
c = self.db.cursor()
c.execute(q, (propname,))
for row in c:
yield row
c.close()
def buildSubGraph(self):
'''
Return a subgraph which may be used to populate from the DB and
do path searching.
'''
return DbSubGraph(self.dbinfo)
class DbSubGraph(DbGraphStore, vg_graphcore.Graph):
'''
A subgraph in the database is basically a forward cached instance of selected
nodes and edges in an in-memory graph (visgraph.graphcore.Graph). This object
may then be used for traditional path tracing without going back to the database.
Any modifications to graph element properties *will* be synchronized back to the
database backing the given subgraph.
'''
def __init__(self, dbinfo):
vg_graphcore.Graph.__init__(self)
DbGraphStore.__init__(self, dbinfo)
def addNode(self, nodeid=None, ninfo=None, **kwargs):
# Do *both*
nid = DbGraphStore.addNode(self, nodeid=nodeid, ninfo=ninfo, **kwargs)
vg_graphcore.Graph.addNode(self, nodeid=nid, ninfo=None, **kwargs)
return nid
def addEdge(self, fromid, toid, einfo):
eid = DbGraphStore.addEdge(self, fromid, toid, einfo=einfo)
vg_graphcore.Graph.addEdge(self, fromid, toid, eid=eid, einfo=None)
return eid
def useEdges(self, **kwargs):
'''
Pull some edges from the DbStore backing this subgraph into the actual
visgraph.graphcore.Graph instance so path traversal is possible.
'''
done = {}
for key,val in kwargs.items():
if type(val) in (int,long):
# FIXME is vg_edges.eid faster or vg_edge_props?
q = 'SELECT vg_edges.eid,n1,n2 FROM vg_edge_props,vg_edges WHERE pname=%s AND intval=%s AND vg_edges.eid=vg_edge_props.eid'
else:
q = 'SELECT vg_edges.eid,n1,n2 FROM vg_edge_props,vg_edges WHERE pname=%s AND strval=%s AND vg_edges.eid=vg_edge_props.eid'
for eid,n1,n2 in self._doSelect(q, key, val):
print 'using: %d (%d->%d)' % (eid, n1, n2)
done[eid] = (eid, n1, n2)
# FIXME add the nodes for these edges
for eid, n1, n2 in done.values():
if vg_graphcore.Graph.getNode(self, n1) == None:
vg_graphcore.Graph.addNode(self, nodeid=n1)
if vg_graphcore.Graph.getNode(self, n2) == None:
vg_graphcore.Graph.addNode(self, nodeid=n2)
vg_graphcore.Graph.addEdge(self, n1, n2, eid=eid)
def expandNode(self, nid, maxdepth=1):
'''
Add *all* the edges (and adjacent nodes) by traversing this nodes
edges to the specified depth...
'''
todo = [(nid, 0),]
print 'INITIAL EXPAND',nid
if vg_graphcore.Graph.getNode(self, nid) == None:
print 'EXPANDING',nid
vg_graphcore.Graph.addNode(self, nodeid=nid)
while len(todo):
nid,depth = todo.pop()
if depth > maxdepth:
continue
# Do expansion based on the *database*
q = 'SELECT eid,n2 FROM vg_edges WHERE n1=%s'
for eid, n2 in self._doSelect(q, nid):
if vg_graphcore.Graph.getNode(self, n2) == None:
print 'EXPANDING',n2
vg_graphcore.Graph.addNode(self, nodeid=n2)
if vg_graphcore.Graph.getEdge(self, eid) == None:
vg_graphcore.Graph.addEdge(self, nid, n2, eid=eid)
ndepth = depth+1
if ndepth < maxdepth:
todo.append((n2, ndepth))
# pullNode?
# expandNode?
|
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Nonogram (Painting by numbers) in Google CP Solver.
http://en.wikipedia.org/wiki/Nonogram
'''
Nonograms or Paint by Numbers are picture logic puzzles in which cells in a
grid have to be colored or left blank according to numbers given at the
side of the grid to reveal a hidden picture. In this puzzle type, the
numbers measure how many unbroken lines of filled-in squares there are
in any given row or column. For example, a clue of '4 8 3' would mean
there are sets of four, eight, and three filled squares, in that order,
with at least one blank square between successive groups.
'''
See problem 12 at http://www.csplib.org/.
http://www.puzzlemuseum.com/nonogram.htm
Haskell solution:
http://twan.home.fmf.nl/blog/haskell/Nonograms.details
Brunetti, Sara & Daurat, Alain (2003)
'An algorithm reconstructing convex lattice sets'
http://geodisi.u-strasbg.fr/~daurat/papiers/tomoqconv.pdf
The Comet model (http://www.hakank.org/comet/nonogram_regular.co)
was a major influence when writing this Google CP solver model.
I have also blogged about the development of a Nonogram solver in Comet
using the regular constraint.
* 'Comet: Nonogram improved: solving problem P200 from 1:30 minutes
to about 1 second'
http://www.hakank.org/constraint_programming_blog/2009/03/comet_nonogram_improved_solvin_1.html
* 'Comet: regular constraint, a much faster Nonogram with the regular
constraint,
some OPL models, and more'
http://www.hakank.org/constraint_programming_blog/2009/02/comet_regular_constraint_a_muc_1.html
Compare with the other models:
* Gecode/R: http://www.hakank.org/gecode_r/nonogram.rb (using 'regexps')
* MiniZinc: http://www.hakank.org/minizinc/nonogram_regular.mzn
* MiniZinc: http://www.hakank.org/minizinc/nonogram_create_automaton.mzn
* MiniZinc: http://www.hakank.org/minizinc/nonogram_create_automaton2.mzn
Note: nonogram_create_automaton2.mzn is the preferred model
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
#
# Make a transition (automaton) list of tuples from a
# single pattern, e.g. [3,2,1]
#
def make_transition_tuples(pattern):
p_len = len(pattern)
num_states = p_len + sum(pattern)
tuples = []
# this is for handling 0-clues. It generates
# just the minimal state
if num_states == 0:
tuples.append((1, 0, 1))
return (tuples, 1)
# convert pattern to a 0/1 pattern for easy handling of
# the states
tmp = [0]
c = 0
for pattern_index in range(p_len):
tmp.extend([1] * pattern[pattern_index])
tmp.append(0)
for i in range(num_states):
state = i + 1
if tmp[i] == 0:
tuples.append((state, 0, state))
tuples.append((state, 1, state + 1))
else:
if i < num_states - 1:
if tmp[i + 1] == 1:
tuples.append((state, 1, state + 1))
else:
tuples.append((state, 0, state + 1))
tuples.append((num_states, 0, num_states))
return (tuples, num_states)
#
# check each rule by creating an automaton and transition constraint.
#
def check_rule(rules, y):
cleaned_rule = [rules[i] for i in range(len(rules)) if rules[i] > 0]
(transition_tuples, last_state) = make_transition_tuples(cleaned_rule)
initial_state = 1
accepting_states = [last_state]
solver = y[0].solver()
solver.Add(solver.TransitionConstraint(y,
transition_tuples,
initial_state,
accepting_states))
def main(rows, row_rule_len, row_rules, cols, col_rule_len, col_rules):
# Create the solver.
solver = pywrapcp.Solver('Regular test')
#
# variables
#
board = {}
for i in range(rows):
for j in range(cols):
board[i, j] = solver.IntVar(0, 1, 'board[%i, %i]' % (i, j))
board_flat = [board[i, j] for i in range(rows) for j in range(cols)]
# Flattened board for labeling.
# This labeling was inspired by a suggestion from
# Pascal Van Hentenryck about my Comet nonogram model.
board_label = []
if rows * row_rule_len < cols * col_rule_len:
for i in range(rows):
for j in range(cols):
board_label.append(board[i, j])
else:
for j in range(cols):
for i in range(rows):
board_label.append(board[i, j])
#
# constraints
#
for i in range(rows):
check_rule(row_rules[i], [board[i, j] for j in range(cols)])
for j in range(cols):
check_rule(col_rules[j], [board[i, j] for i in range(rows)])
#
# solution and search
#
db = solver.Phase(board_label,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
print('before solver, wall time = ', solver.WallTime(), 'ms')
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print()
num_solutions += 1
for i in range(rows):
row = [board[i, j].Value() for j in range(cols)]
row_pres = []
for j in row:
if j == 1:
row_pres.append('#')
else:
row_pres.append(' ')
print(' ', ''.join(row_pres))
print()
print(' ', '-' * cols)
if num_solutions >= 2:
print('2 solutions is enough...')
break
solver.EndSearch()
print()
print('num_solutions:', num_solutions)
print('failures:', solver.Failures())
print('branches:', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
#
# Default problem
#
# From http://twan.home.fmf.nl/blog/haskell/Nonograms.details
# The lambda picture
#
rows = 12
row_rule_len = 3
row_rules = [
[0, 0, 2],
[0, 1, 2],
[0, 1, 1],
[0, 0, 2],
[0, 0, 1],
[0, 0, 3],
[0, 0, 3],
[0, 2, 2],
[0, 2, 1],
[2, 2, 1],
[0, 2, 3],
[0, 2, 2]
]
cols = 10
col_rule_len = 2
col_rules = [
[2, 1],
[1, 3],
[2, 4],
[3, 4],
[0, 4],
[0, 3],
[0, 3],
[0, 3],
[0, 2],
[0, 2]
]
if __name__ == '__main__':
if len(sys.argv) > 1:
file = sys.argv[1]
exec(compile(open(file).read(), file, 'exec'))
main(rows, row_rule_len, row_rules, cols, col_rule_len, col_rules)
|
|
#!/usr/bin/env python3
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates several files used by the size trybot to monitor size regressions."""
import argparse
import collections
import json
import logging
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'libsupersize'))
import archive
import diagnose_bloat
import diff
import describe
import file_format
import models
_RESOURCE_SIZES_LOG = 'resource_sizes_log'
_BASE_RESOURCE_SIZES_LOG = 'base_resource_sizes_log'
_MUTABLE_CONSTANTS_LOG = 'mutable_contstants_log'
_FOR_TESTING_LOG = 'for_test_log'
_DEX_SYMBOLS_LOG = 'dex_symbols_log'
_SIZEDIFF_FILENAME = 'supersize_diff.sizediff'
_HTML_REPORT_URL = (
'https://chrome-supersize.firebaseapp.com/viewer.html?load_url={{' +
_SIZEDIFF_FILENAME + '}}')
_MAX_DEX_METHOD_COUNT_INCREASE = 50
_MAX_NORMALIZED_INCREASE = 16 * 1024
_MAX_PAK_INCREASE = 1024
_PROGUARD_CLASS_MAPPING_RE = re.compile(r'(?P<original_name>[^ ]+)'
r' -> '
r'(?P<obfuscated_name>[^:]+):')
_PROGUARD_FIELD_MAPPING_RE = re.compile(r'(?P<type>[^ ]+) '
r'(?P<original_name>[^ (]+)'
r' -> '
r'(?P<obfuscated_name>[^:]+)')
_PROGUARD_METHOD_MAPPING_RE = re.compile(
# line_start:line_end: (optional)
r'((?P<line_start>\d+):(?P<line_end>\d+):)?'
r'(?P<return_type>[^ ]+)' # original method return type
# original method class name (if exists)
r' (?:(?P<original_method_class>[a-zA-Z_\d.$]+)\.)?'
r'(?P<original_method_name>[^.\(]+)'
r'\((?P<params>[^\)]*)\)' # original method params
r'(?:[^ ]*)' # original method line numbers (ignored)
r' -> '
r'(?P<obfuscated_name>.+)') # obfuscated method name
class _SizeDelta(collections.namedtuple(
'SizeDelta', ['name', 'units', 'expected', 'actual'])):
@property
def explanation(self):
ret = '{}: {} {} (max is {} {})'.format(
self.name, self.actual, self.units, self.expected, self.units)
return ret
def IsAllowable(self):
return self.actual <= self.expected
def IsLargeImprovement(self):
return (self.actual * -1) >= self.expected
def __lt__(self, other):
return self.name < other.name
def _SymbolDiffHelper(title_fragment, symbols):
added = symbols.WhereDiffStatusIs(models.DIFF_STATUS_ADDED)
removed = symbols.WhereDiffStatusIs(models.DIFF_STATUS_REMOVED)
both = (added + removed).SortedByName()
lines = []
if len(both) > 0:
for group in both.GroupedByContainer():
counts = group.CountsByDiffStatus()
lines += [
'===== {} Added & Removed ({}) ====='.format(
title_fragment, group.full_name),
'Added: {}'.format(counts[models.DIFF_STATUS_ADDED]),
'Removed: {}'.format(counts[models.DIFF_STATUS_REMOVED]),
''
]
lines.extend(describe.GenerateLines(group, summarize=False))
lines += ['']
return lines, len(added) - len(removed)
def _CreateMutableConstantsDelta(symbols):
symbols = symbols.WhereInSection('d').WhereNameMatches(r'\bk[A-Z]|\b[A-Z_]+$')
lines, net_added = _SymbolDiffHelper('Mutable Constants', symbols)
return lines, _SizeDelta('Mutable Constants', 'symbols', 0, net_added)
def _CreateMethodCountDelta(symbols):
symbols = symbols.WhereIsOnDemand(False)
method_symbols = symbols.WhereInSection(models.SECTION_DEX_METHOD)
method_lines, net_method_added = _SymbolDiffHelper('Methods', method_symbols)
class_symbols = symbols.WhereInSection(
models.SECTION_DEX).WhereNameMatches('#').Inverted()
class_lines, _ = _SymbolDiffHelper('Classes', class_symbols)
lines = []
if class_lines:
lines.extend(class_lines)
lines.extend(['', '']) # empty lines added for clarity
if method_lines:
lines.extend(method_lines)
return lines, _SizeDelta('Dex Methods Count', 'methods',
_MAX_DEX_METHOD_COUNT_INCREASE, net_method_added)
def _CreateResourceSizesDelta(before_dir, after_dir):
sizes_diff = diagnose_bloat.ResourceSizesDiff()
sizes_diff.ProduceDiff(before_dir, after_dir)
return sizes_diff.Summary(), _SizeDelta(
'Normalized APK Size', 'bytes', _MAX_NORMALIZED_INCREASE,
sizes_diff.summary_stat.value)
def _CreateBaseModuleResourceSizesDelta(before_dir, after_dir):
sizes_diff = diagnose_bloat.ResourceSizesDiff(include_sections=['base'])
sizes_diff.ProduceDiff(before_dir, after_dir)
return sizes_diff.DetailedResults(), _SizeDelta(
'Base Module Size', 'bytes', _MAX_NORMALIZED_INCREASE,
sizes_diff.CombinedSizeChangeForSection('base'))
def _CreateSupersizeDiff(main_file_name, before_dir, after_dir, review_subject,
review_url):
before_size_path = os.path.join(before_dir, main_file_name + '.size')
after_size_path = os.path.join(after_dir, main_file_name + '.size')
before = archive.LoadAndPostProcessSizeInfo(before_size_path)
after = archive.LoadAndPostProcessSizeInfo(after_size_path)
if review_subject:
after.build_config[models.BUILD_CONFIG_TITLE] = review_subject
if review_url:
after.build_config[models.BUILD_CONFIG_URL] = review_url
size_info_delta = diff.Diff(before, after, sort=True)
lines = list(describe.GenerateLines(size_info_delta))
return lines, size_info_delta
def _CreateUncompressedPakSizeDeltas(symbols):
pak_symbols = symbols.Filter(lambda s:
s.size > 0 and
bool(s.flags & models.FLAG_UNCOMPRESSED) and
s.section_name == models.SECTION_PAK_NONTRANSLATED)
return [
_SizeDelta('Uncompressed Pak Entry "{}"'.format(pak.full_name), 'bytes',
_MAX_PAK_INCREASE, pak.after_symbol.size)
for pak in pak_symbols
]
def _ExtractForTestingSymbolsFromSingleMapping(mapping_path):
with open(mapping_path) as f:
proguard_mapping_lines = f.readlines()
current_class_orig = None
for line in proguard_mapping_lines:
if line.isspace():
continue
if not line.startswith(' '):
match = _PROGUARD_CLASS_MAPPING_RE.search(line)
if match is None:
raise Exception('Malformed class mapping')
current_class_orig = match.group('original_name')
continue
assert current_class_orig is not None
line = line.strip()
match = _PROGUARD_METHOD_MAPPING_RE.search(line)
if (match is not None
and match.group('original_method_name').find('ForTest') > -1):
method_symbol = '{}#{}'.format(
match.group('original_method_class') or current_class_orig,
match.group('original_method_name'))
yield method_symbol
match = _PROGUARD_FIELD_MAPPING_RE.search(line)
if (match is not None
and match.group('original_name').find('ForTest') > -1):
field_symbol = '{}#{}'.format(current_class_orig,
match.group('original_name'))
yield field_symbol
def _ExtractForTestingSymbolsFromMappings(mapping_paths):
symbols = set()
for mapping_path in mapping_paths:
symbols.update(_ExtractForTestingSymbolsFromSingleMapping(mapping_path))
return symbols
def _CreateTestingSymbolsDeltas(before_mapping_paths, after_mapping_paths):
before_symbols = _ExtractForTestingSymbolsFromMappings(before_mapping_paths)
after_symbols = _ExtractForTestingSymbolsFromMappings(after_mapping_paths)
added_symbols = list(after_symbols.difference(before_symbols))
removed_symbols = list(before_symbols.difference(after_symbols))
lines = []
if added_symbols:
lines.append('Added Symbols Named "ForTest"')
lines.extend(added_symbols)
lines.extend(['', '']) # empty lines added for clarity
if removed_symbols:
lines.append('Removed Symbols Named "ForTest"')
lines.extend(removed_symbols)
lines.extend(['', '']) # empty lines added for clarity
return lines, _SizeDelta('Added symbols named "ForTest"', 'symbols', 0,
len(added_symbols) - len(removed_symbols))
def _GenerateBinarySizePluginDetails(metrics):
binary_size_listings = []
for delta, log_name in metrics:
# Only show the base module delta if it is significant.
if (log_name == _BASE_RESOURCE_SIZES_LOG and delta.IsAllowable()
and not delta.IsLargeImprovement()):
continue
listing = {
'name': delta.name,
'delta': '{} {}'.format(_FormatNumber(delta.actual), delta.units),
'limit': '{} {}'.format(_FormatNumber(delta.expected), delta.units),
'log_name': log_name,
'allowed': delta.IsAllowable(),
'large_improvement': delta.IsLargeImprovement(),
}
if log_name == _RESOURCE_SIZES_LOG:
listing['name'] = 'Android Binary Size'
binary_size_listings.insert(0, listing)
continue
# The main 'binary size' delta is always shown even if unchanged.
if delta.actual == 0:
continue
binary_size_listings.append(listing)
binary_size_extras = [
{
'text': 'APK Breakdown',
'url': _HTML_REPORT_URL
},
]
return {
'listings': binary_size_listings,
'extras': binary_size_extras,
}
def _FormatNumber(number):
# Adds a sign for positive numbers and puts commas in large numbers
return '{:+,}'.format(number)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--author', required=True, help='CL author')
parser.add_argument('--review-subject', help='Review subject')
parser.add_argument('--review-url', help='Review URL')
parser.add_argument('--size-config-json-name',
required=True,
help='Filename of JSON with configs for '
'binary size measurement.')
parser.add_argument(
'--before-dir',
required=True,
help='Directory containing the APK from reference build.')
parser.add_argument(
'--after-dir',
required=True,
help='Directory containing APK for the new build.')
parser.add_argument(
'--results-path',
required=True,
help='Output path for the trybot result .json file.')
parser.add_argument(
'--staging-dir',
required=True,
help='Directory to write summary files to.')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
to_before_path = lambda p: os.path.join(args.before_dir, os.path.basename(p))
to_after_path = lambda p: os.path.join(args.after_dir, os.path.basename(p))
with open(to_after_path(args.size_config_json_name), 'rt') as fh:
config = json.load(fh)
supersize_input_name = os.path.basename(config['supersize_input_file'])
before_mapping_paths = [to_before_path(f) for f in config['mapping_files']]
after_mapping_paths = [to_after_path(f) for f in config['mapping_files']]
logging.info('Creating Supersize diff')
supersize_diff_lines, delta_size_info = _CreateSupersizeDiff(
supersize_input_name, args.before_dir, args.after_dir,
args.review_subject, args.review_url)
changed_symbols = delta_size_info.raw_symbols.WhereDiffStatusIs(
models.DIFF_STATUS_UNCHANGED).Inverted()
# Monitor dex method count since the "multidex limit" is a thing.
logging.info('Checking dex symbols')
dex_delta_lines, dex_delta = _CreateMethodCountDelta(changed_symbols)
size_deltas = {dex_delta}
metrics = {(dex_delta, _DEX_SYMBOLS_LOG)}
# Look for native symbols called "kConstant" that are not actually constants.
# C++ syntax makes this an easy mistake, and having symbols in .data uses more
# RAM than symbols in .rodata (at least for multi-process apps).
logging.info('Checking for mutable constants in native symbols')
mutable_constants_lines, mutable_constants_delta = (
_CreateMutableConstantsDelta(changed_symbols))
size_deltas.add(mutable_constants_delta)
metrics.add((mutable_constants_delta, _MUTABLE_CONSTANTS_LOG))
# Look for symbols with 'ForTest' in their name.
logging.info('Checking for DEX symbols named "ForTest"')
testing_symbols_lines, test_symbols_delta = (_CreateTestingSymbolsDeltas(
before_mapping_paths, after_mapping_paths))
size_deltas.add(test_symbols_delta)
metrics.add((test_symbols_delta, _FOR_TESTING_LOG))
# Check for uncompressed .pak file entries being added to avoid unnecessary
# bloat.
logging.info('Checking pak symbols')
size_deltas.update(_CreateUncompressedPakSizeDeltas(changed_symbols))
# Normalized APK Size is the main metric we use to monitor binary size.
logging.info('Creating sizes diff')
resource_sizes_lines, resource_sizes_delta = (_CreateResourceSizesDelta(
args.before_dir, args.after_dir))
size_deltas.add(resource_sizes_delta)
metrics.add((resource_sizes_delta, _RESOURCE_SIZES_LOG))
logging.info('Creating base module sizes diff')
base_resource_sizes_lines, base_resource_sizes_delta = (
_CreateBaseModuleResourceSizesDelta(args.before_dir, args.after_dir))
size_deltas.add(base_resource_sizes_delta)
metrics.add((base_resource_sizes_delta, _BASE_RESOURCE_SIZES_LOG))
# .sizediff can be consumed by the html viewer.
logging.info('Creating HTML Report')
sizediff_path = os.path.join(args.staging_dir, _SIZEDIFF_FILENAME)
file_format.SaveDeltaSizeInfo(delta_size_info, sizediff_path)
passing_deltas = set(d for d in size_deltas if d.IsAllowable())
failing_deltas = size_deltas - passing_deltas
is_roller = '-autoroll' in args.author
failing_checks_text = '\n'.join(d.explanation for d in sorted(failing_deltas))
passing_checks_text = '\n'.join(d.explanation for d in sorted(passing_deltas))
checks_text = """\
FAILING Checks:
{}
PASSING Checks:
{}
To understand what those checks are and how to pass them, see:
https://chromium.googlesource.com/chromium/src/+/main/docs/speed/binary_size/android_binary_size_trybot.md
""".format(failing_checks_text, passing_checks_text)
status_code = int(bool(failing_deltas))
# Give rollers a free pass, except for mutable constants.
# Mutable constants are rare, and other regressions are generally noticed in
# size graphs and can be investigated after-the-fact.
if is_roller and mutable_constants_delta not in failing_deltas:
status_code = 0
summary = '<br>' + checks_text.replace('\n', '<br>')
links_json = [
{
'name': 'Binary Size Details',
'lines': resource_sizes_lines,
'log_name': _RESOURCE_SIZES_LOG,
},
{
'name': 'Base Module Binary Size Details',
'lines': base_resource_sizes_lines,
'log_name': _BASE_RESOURCE_SIZES_LOG,
},
{
'name': 'Mutable Constants Diff',
'lines': mutable_constants_lines,
'log_name': _MUTABLE_CONSTANTS_LOG,
},
{
'name': 'ForTest Symbols Diff',
'lines': testing_symbols_lines,
'log_name': _FOR_TESTING_LOG,
},
{
'name': 'Dex Class and Method Diff',
'lines': dex_delta_lines,
'log_name': _DEX_SYMBOLS_LOG,
},
{
'name': 'SuperSize Text Diff',
'lines': supersize_diff_lines,
},
{
'name': 'SuperSize HTML Diff',
'url': _HTML_REPORT_URL,
},
]
# Remove empty diffs (Mutable Constants, Dex Method, ...).
links_json = [o for o in links_json if o.get('lines') or o.get('url')]
binary_size_plugin_json = _GenerateBinarySizePluginDetails(metrics)
results_json = {
'status_code': status_code,
'summary': summary,
'archive_filenames': [_SIZEDIFF_FILENAME],
'links': links_json,
'gerrit_plugin_details': binary_size_plugin_json,
}
with open(args.results_path, 'w') as f:
json.dump(results_json, f)
if __name__ == '__main__':
main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .TProtocol import TProtocolBase, TProtocolException
from thrift.Thrift import TApplicationException, TMessageType
from .TBinaryProtocol import TBinaryProtocolAccelerated
from .TCompactProtocol import TCompactProtocolAccelerated
from thrift.transport.THeaderTransport import THeaderTransport, CLIENT_TYPE
class THeaderProtocol(TProtocolBase):
"""Pass through header protocol (transport can set)"""
T_BINARY_PROTOCOL = 0
T_JSON_PROTOCOL = 1
T_COMPACT_PROTOCOL = 2
__proto = None
__proto_id = None
def get_protocol_id(self):
return self.__proto_id
def reset_protocol(self):
if self.__proto_id == self.trans.get_protocol_id():
return
proto_id = self.trans.get_protocol_id()
if proto_id == self.T_BINARY_PROTOCOL:
self.__proto = TBinaryProtocolAccelerated(self.trans,
self.strictRead, True)
elif proto_id == self.T_COMPACT_PROTOCOL:
self.__proto = TCompactProtocolAccelerated(self.trans)
else:
raise TApplicationException(TProtocolException.INVALID_PROTOCOL,
"Unknown protocol requested")
self.__proto_id = proto_id
def __init__(self, trans, strictRead=False,
client_types=None, client_type=None):
"""Create a THeaderProtocol instance
@param transport(TTransport) The underlying transport.
@param strictRead(bool) Turn on strictRead if using TBinaryProtocol
@param client_types([CLIENT_TYPE.HEADER, ...])
List of client types to support. Defaults to
CLIENT_TYPE.HEADER only.
"""
if isinstance(trans, THeaderTransport):
trans._THeaderTransport__supported_client_types = set(
client_types or (CLIENT_TYPE.HEADER,))
if client_type is not None:
trans._THeaderTransport__client_type = client_type
htrans = trans
else:
htrans = THeaderTransport(trans, client_types, client_type)
TProtocolBase.__init__(self, htrans)
self.strictRead = strictRead
self.reset_protocol()
def writeMessageBegin(self, name, type, seqid):
self.__proto.writeMessageBegin(name, type, seqid)
if type == TMessageType.CALL or type == TMessageType.ONEWAY:
# All client to server coms should have a unique seq_id in HEADER
self.trans.seq_id = seqid
def writeMessageEnd(self):
self.__proto.writeMessageEnd()
def writeStructBegin(self, name):
self.__proto.writeStructBegin(name)
def writeStructEnd(self):
self.__proto.writeStructEnd()
def writeFieldBegin(self, name, type, id):
self.__proto.writeFieldBegin(name, type, id)
def writeFieldEnd(self):
self.__proto.writeFieldEnd()
def writeFieldStop(self):
self.__proto.writeFieldStop()
def writeMapBegin(self, ktype, vtype, size):
self.__proto.writeMapBegin(ktype, vtype, size)
def writeMapEnd(self):
self.__proto.writeMapEnd()
def writeListBegin(self, etype, size):
self.__proto.writeListBegin(etype, size)
def writeListEnd(self):
self.__proto.writeListEnd()
def writeSetBegin(self, etype, size):
self.__proto.writeSetBegin(etype, size)
def writeSetEnd(self):
self.__proto.writeSetEnd()
def writeBool(self, bool):
self.__proto.writeBool(bool)
def writeByte(self, byte):
self.__proto.writeByte(byte)
def writeI16(self, i16):
self.__proto.writeI16(i16)
def writeI32(self, i32):
self.__proto.writeI32(i32)
def writeI64(self, i64):
self.__proto.writeI64(i64)
def writeDouble(self, dub):
self.__proto.writeDouble(dub)
def writeFloat(self, flt):
self.__proto.writeFloat(flt)
def writeString(self, str):
self.__proto.writeString(str)
def readMessageBegin(self):
#Read the next frame, and change protocols if needed
try:
self.trans._reset_protocol()
self.reset_protocol()
except TApplicationException as ex:
if self.__proto:
self.writeMessageBegin(b"", TMessageType.EXCEPTION, 0)
ex.write(self)
self.writeMessageEnd()
self.trans.flush()
return self.__proto.readMessageBegin()
def readMessageEnd(self):
return self.__proto.readMessageEnd()
def readStructBegin(self):
return self.__proto.readStructBegin()
def readStructEnd(self):
return self.__proto.readStructEnd()
def readFieldBegin(self):
return self.__proto.readFieldBegin()
def readFieldEnd(self):
return self.__proto.readFieldEnd()
def readMapBegin(self):
return self.__proto.readMapBegin()
def readMapEnd(self):
return self.__proto.readMapEnd()
def readListBegin(self):
return self.__proto.readListBegin()
def readListEnd(self):
return self.__proto.readListEnd()
def readSetBegin(self):
return self.__proto.readSetBegin()
def readSetEnd(self):
return self.__proto.readSetEnd()
def readBool(self):
return self.__proto.readBool()
def readByte(self):
return self.__proto.readByte()
def readI16(self):
return self.__proto.readI16()
def readI32(self):
return self.__proto.readI32()
def readI64(self):
return self.__proto.readI64()
def readDouble(self):
return self.__proto.readDouble()
def readFloat(self):
return self.__proto.readFloat()
def readString(self):
return self.__proto.readString()
class THeaderProtocolFactory(object):
def __init__(self, strictRead=False, client_types=None, client_type=None):
self.strictRead = strictRead
self.client_types = client_types
self.client_type = client_type
def getProtocol(self, trans):
prot = THeaderProtocol(trans, self.strictRead, self.client_types,
self.client_type)
return prot
|
|
from datetime import datetime
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from mock import ANY, Mock, patch
from nose.tools import eq_, ok_, raises
from amo.tests import app_factory, TestCase
from constants.payments import PROVIDER_BANGO, PROVIDER_REFERENCE
from mkt.developers.models import PaymentAccount, SolitudeSeller
from mkt.developers.providers import Bango, get_provider, Reference
from mkt.site.fixtures import fixture
from users.models import UserProfile
class Patcher(object):
"""
This class patch your test case so that any attempt to call solitude
from zamboni through these classes will use the mock.
Use this class as mixin on any tests that alter payment accounts.
If you override setUp or tearDown be sure to call super.
"""
def setUp(self, *args, **kw):
super(Patcher, self).setUp(*args, **kw)
# Once everything has moved over to the provider, this one
# can be remoed.
client_patcher = patch('mkt.developers.models.client',
name='test_providers.Patcher.client_patcher')
self.patched_client = client_patcher.start()
self.patched_client.patcher = client_patcher
self.addCleanup(client_patcher.stop)
bango_patcher = patch('mkt.developers.providers.Bango.client',
name='test_providers.Patcher.bango_patcher')
self.bango_patcher = bango_patcher.start()
self.bango_patcher.patcher = bango_patcher
self.addCleanup(bango_patcher.stop)
bango_p_patcher = patch(
'mkt.developers.providers.Bango.client_provider',
name='test_providers.Patcher.bango_p_patcher')
self.bango_p_patcher = bango_p_patcher.start()
self.bango_p_patcher.patcher = bango_p_patcher
self.addCleanup(bango_p_patcher.stop)
ref_patcher = patch('mkt.developers.providers.Reference.client',
name='test_providers.Patcher.ref_patcher')
self.ref_patcher = ref_patcher.start()
self.ref_patcher.patcher = ref_patcher
self.addCleanup(ref_patcher.stop)
generic_patcher = patch('mkt.developers.providers.Provider.generic',
name='test_providers.Patcher.generic_patcher')
self.generic_patcher = generic_patcher.start()
self.generic_patcher.patcher = generic_patcher
self.addCleanup(generic_patcher.stop)
class TestSetup(TestCase):
@raises(ImproperlyConfigured)
def test_multiple(self):
with self.settings(PAYMENT_PROVIDERS=['foo', 'bar']):
get_provider()
class TestBango(Patcher, TestCase):
fixtures = fixture('user_999')
def setUp(self):
super(TestBango, self).setUp()
self.user = UserProfile.objects.filter()[0]
self.app = app_factory()
self.make_premium(self.app)
self.seller = SolitudeSeller.objects.create(
resource_uri='sellerres', user=self.user
)
self.account = PaymentAccount.objects.create(
solitude_seller=self.seller,
user=self.user, name='paname', uri='acuri',
inactive=False, seller_uri='selluri',
account_id=123, provider=PROVIDER_BANGO
)
self.bango = Bango()
def test_create(self):
self.generic_patcher.product.get_object_or_404.return_value = {
'resource_uri': 'gpuri'}
self.bango_patcher.product.get_object_or_404.return_value = {
'resource_uri': 'bpruri', 'bango_id': 'bango#', 'seller': 'selluri'
}
uri = self.bango.product_create(self.account, self.app)
eq_(uri, 'bpruri')
def test_create_new(self):
self.bango_patcher.product.get_object_or_404.side_effect = (
ObjectDoesNotExist)
self.bango_p_patcher.product.post.return_value = {
'resource_uri': '', 'bango_id': 1
}
self.bango.product_create(self.account, self.app)
ok_('packageId' in
self.bango_p_patcher.product.post.call_args[1]['data'])
def test_terms_bleached(self):
self.bango_patcher.sbi.agreement.get_object.return_value = {
'text': '<script>foo</script><h3></h3>'}
eq_(self.bango.terms_retrieve(Mock())['text'],
u'<script>foo</script><h3></h3>')
class TestReference(Patcher, TestCase):
fixtures = fixture('user_999')
def setUp(self, *args, **kw):
super(TestReference, self).setUp(*args, **kw)
self.user = UserProfile.objects.get(pk=999)
self.ref = Reference()
def test_setup_seller(self):
self.ref.setup_seller(self.user)
ok_(SolitudeSeller.objects.filter(user=self.user).exists())
def test_account_create(self):
data = {'account_name': 'account', 'name': 'f', 'email': 'a@a.com'}
res = self.ref.account_create(self.user, data)
acct = PaymentAccount.objects.get(user=self.user)
eq_(acct.provider, PROVIDER_REFERENCE)
eq_(res.pk, acct.pk)
self.ref_patcher.sellers.post.assert_called_with(data={
'status': 'ACTIVE',
'email': 'a@a.com',
'uuid': ANY,
'name': 'f',
})
def make_account(self):
seller = SolitudeSeller.objects.create(user=self.user)
return PaymentAccount.objects.create(user=self.user,
solitude_seller=seller,
uri='/f/b/1',
name='account name')
def test_terms_retrieve(self):
account = self.make_account()
self.ref.terms_retrieve(account)
assert self.ref_patcher.terms.called
def test_terms_bleached(self):
account = self.make_account()
account_mock = Mock()
account_mock.get.return_value = {'text':
'<script>foo</script><a>bar</a>'}
self.ref_patcher.terms.return_value = account_mock
eq_(self.ref.terms_retrieve(account)['text'],
u'<script>foo</script><a>bar</a>')
def test_terms_update(self):
seller_mock = Mock()
seller_mock.get.return_value = {
'id': 1,
'resource_uri': '/a/b/c',
'resource_name': 'x',
'initial_field': u'initial content',
}
seller_mock.put.return_value = {}
self.ref_patcher.sellers.return_value = seller_mock
account = self.make_account()
self.ref.terms_update(account)
eq_(account.reload().agreed_tos, True)
assert self.ref_patcher.sellers.called
seller_mock.get.assert_called_with()
seller_mock.put.assert_called_with({
'agreement': datetime.now().strftime('%Y-%m-%d'),
'initial_field': u'initial content',
})
def test_account_retrieve(self):
account = self.make_account()
acc = self.ref.account_retrieve(account)
eq_(acc, {'account_name': 'account name'})
assert self.ref_patcher.sellers.called
def test_account_update(self):
account_data = {
'status': '',
'resource_name': 'sellers',
'uuid': 'custom-uuid',
'agreement': '',
'email': 'a@a.com',
'id': 'custom-uuid',
'resource_uri': '/provider/reference/sellers/custom-uuid/',
'account_name': u'Test',
'name': 'Test',
}
seller_mock = Mock()
seller_mock.get.return_value = account_data
self.ref_patcher.sellers.return_value = seller_mock
account = self.make_account()
self.ref.account_update(account, account_data)
eq_(self.ref.forms['account']().hidden_fields()[0].name, 'uuid')
eq_(account.reload().name, 'Test')
seller_mock.put.assert_called_with(account_data)
def test_product_create_exists(self):
self.ref_patcher.products.get.return_value = [{'resource_uri': '/f'}]
account = self.make_account()
app = app_factory()
self.ref.product_create(account, app)
# Product should have been got from zippy, but not created by a post.
assert self.ref_patcher.products.get.called
@raises(ValueError)
def test_product_mulitple(self):
self.ref_patcher.products.get.return_value = [{}, {}]
account = self.make_account()
app = app_factory()
self.ref.product_create(account, app)
def test_product_create_not(self):
self.generic_patcher.product.get_object_or_404.return_value = {
'external_id': 'ext'}
self.ref_patcher.products.get.return_value = []
self.ref_patcher.products.post.return_value = {'resource_uri': '/f'}
account = self.make_account()
app = app_factory()
self.ref.product_create(account, app)
self.ref_patcher.products.get.assert_called_with(
seller_id='1', external_id='ext')
self.ref_patcher.products.post.assert_called_with(data={
'seller_id': '1',
'external_id': 'ext',
'name': unicode(app.name),
'uuid': ANY,
})
|
|
'''Contains DStoken class for DateSense package.'''
# Used by the parser for keeping track of what goes where, and what can possibly go where
class DStoken(object):
'''DStoken objects are used by the parser for keeping track of what
goes where in tokenized date strings, and what can possibly go where
in format strings as determined by a DSoptions options.
In the context of tokenized date strings: each DStoken object
contains information on what's actually there in the string.
In the context of DSoptions allowed lists: each DStoken object
contains information on what directives (or non-directive strings)
are allowed to go where, and how likely the parser thinks each must
be. (The higher a DStoken's score, the more likely it's considered
to be.)
'''
# Consts for the different kinds of tokens recognized
KIND_DECORATOR = 0
KIND_NUMBER = 1
KIND_WORD = 2
KIND_TIMEZONE = 3
# Const for number of characters timezone offset numbers are expected to be, e.g. +0100 or -0300 (You definitely want this value to be 4.)
TIMEZONE_LENGTH = 4
# Constructors
def __init__(self, kind, text, option=None):
'''Constructs a DStoken object.
You probably want to be using the kind-specific constructors
instead of this one: create_decorator, create_number,
create_word, and create_timezone.
Returns the DStoken object.
:param kind: The DStoken kind, which should be one of
DStoken.KIND_DECORATOR, DStoken.KIND_NUMBER,
DStoken.KIND_WORD, DStoken.KIND_TIMEZONE.
:param text: A string to associate with this this object.
:param option: (optional) A NumOption or WordOption object to
associate with this object. Defaults to None.
'''
self.kind = kind
self.text = text
self.option = option
if option:
self.score = option.common
else:
self.score = 0
@staticmethod
def create_decorator(text):
'''Constructs a DStoken object for a decorator token.
Decorator token possibilities are those which correspond to
no directive. For example, the ':' characters in '%H:%M:%S'.
Returns the DStoken object.
:param text: A string to associate with this this object.
'''
return DStoken(DStoken.KIND_DECORATOR, text, None)
@staticmethod
def create_number(option):
'''Constructs a DStoken object for a numeric token.
Returns the DStoken object.
:param option: A directive option to associate with this this
object and to derive its text attribute from. Should be a
NumOption object.
'''
return DStoken(DStoken.KIND_NUMBER, option.directive, option)
@staticmethod
def create_word(option):
'''Constructs a DStoken object for an alphabetical token.
Returns the DStoken object.
:param option: A directive option to associate with this this
object and to derive its text attribute from. Should be a
WordOption object.
'''
return DStoken(DStoken.KIND_WORD, option.directive, option)
@staticmethod
def create_timezone(directive):
'''Constructs a DStoken object for a timezone offset token.
Returns the DStoken object.
:param directive: A string to associate with this this object.
(You probably want this to be '%z'.)
'''
return DStoken(DStoken.KIND_TIMEZONE, directive, None)
# Get a string representation
def __str__(self):
kinds = "dec", "num", "word", "tz"
return kinds[self.kind] + ":'" + self.text + "'(" + str(self.score) + ")"
def __repr__(self):
return self.__str__()
# Simple methods for checking whether the token is a specific kind
def is_decorator(self):
'''Returns true if the DStoken is for a decorator, false otherwise.'''
return self.kind == DStoken.KIND_DECORATOR
def is_number(self):
'''Returns true if the DStoken is for a numeric directive, false otherwise.'''
return self.kind == DStoken.KIND_NUMBER
def is_word(self):
'''Returns true if the DStoken is for an alphabetical directive, false otherwise.'''
return self.kind == DStoken.KIND_WORD
def is_timezone(self):
'''Returns true if the DStoken is for a timezone offset directive, false otherwise.'''
return self.kind == DStoken.KIND_TIMEZONE
@staticmethod
def tokenize_date(date_string):
'''Tokenizes a date string.
Tokens are divided on the basis of whether each character is
a letter, a digit, or neither. (With some special handling to
combine tokens like '+' and '0100' or '-' and '0300' into one
timezone offset token.) For example, the string '12 34Abc?+1000'
would be tokenized like so: '12', ' ', '34', 'Abc', '?', '+1000'.
Returns a list of DStoken objects.
:param date_string: The date string to be tokenized.
'''
current_text = ''
current_kind = -1
tokens = []
# Iterate through characters in the date string, divide into tokens and assign token kinds.
# Digits become number tokens, letters become word tokens, four-digit numbers preceded by '+' or '-' become timezone tokens. Everything else becomes decorator tokens.
for char in date_string:
asc = ord(char)
is_digit = (asc>=48 and asc<=57) # 0-9
is_alpha = (asc>=97 and asc<=122) or (asc>=65 and asc<=90) # a-zA-Z
is_tzoff = (asc==43 or asc==45) # +|-
tokkind = DStoken.KIND_NUMBER*is_digit + DStoken.KIND_WORD*is_alpha + DStoken.KIND_TIMEZONE*is_tzoff
if tokkind == current_kind and current_kind != DStoken.KIND_TIMEZONE:
current_text += char
else:
if current_text:
tokens.append(DStoken(current_kind, current_text))
current_kind = tokkind
current_text = char
if current_text:
tokens.append(DStoken(current_kind, current_text))
# Additional pass for handling timezone tokens
rettokens = []
skip = False
tokens_count = len(tokens)
for i in range(0,tokens_count):
if skip:
skip = False
else:
tok = tokens[i]
if tok.is_timezone():
tokprev = tokens[i-1] if (i > 0) else None
toknext = tokens[i+1] if (i < tokens_count-1) else None
check_prev = (not tokprev) or not (tokprev.is_number() or tokprev.is_timezone())
check_next = toknext and toknext.is_number() and (len(toknext.text) == DStoken.TIMEZONE_LENGTH)
if check_prev and check_next:
tok.text += toknext.text
skip = True
else:
tok.kind = DStoken.KIND_DECORATOR
rettokens.append(tok)
# All done!
return rettokens
# Convenience functions for doing useful operations on sets of token possibilities
@staticmethod
def get_token_with_text(toklist, text):
'''Returns the first token in a set matching the specified text.
:param toklist: A list of DStoken objects.
:param text: The text to search for.
'''
for tok in toklist:
if tok.text in text:
return tok
return None
@staticmethod
def get_max_score(toklist):
'''Returns the highest-scoring token in a set.
In case of a tie, the lowest-index token will be returned.
:param toklist: A list of DStoken objects.
'''
high = None
for tok in toklist:
if (not high) or tok.score > high.score:
high = tok
return high
@staticmethod
def get_all_max_score(toklist):
'''Returns a list of the highest-scoring tokens in a set.'''
high = []
for tok in toklist:
if (not high) or tok.score > high[0].score:
high = [tok]
elif tok.score == high[0].score:
high.append(tok)
return high
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest
from blinkpy.web_tests.layout_package import bot_test_expectations
from blinkpy.web_tests.builder_list import BuilderList
class BotTestExpectationsFactoryTest(unittest.TestCase):
# pylint: disable=protected-access
def fake_builder_list(self):
return BuilderList({
'Dummy builder name': {
'master': 'dummy.master',
'port_name': 'dummy-port',
'specifiers': ['dummy', 'release'],
},
'Dummy tryserver builder name': {
'master': 'tryserver.dummy.master',
'port_name': 'dummy-port',
'specifiers': ['dummy', 'release'],
"is_try_builder": True
},
})
def fake_results_json_for_builder(self, builder):
return bot_test_expectations.ResultsJSON(builder, 'Dummy content')
def test_results_url_for_builder(self):
factory = bot_test_expectations.BotTestExpectationsFactory(
self.fake_builder_list())
self.assertEqual(
factory._results_url_for_builder('Dummy builder name'),
'https://test-results.appspot.com/testfile?testtype=blink_web_tests'
'&name=results-small.json&master=dummy.master&builder=Dummy%20builder%20name')
self.assertEqual(
factory._results_url_for_builder('Dummy tryserver builder name'),
'https://test-results.appspot.com/testfile?'
'testtype=blink_web_tests'
'&name=results-small.json&master=tryserver.dummy.master'
'&builder=Dummy%20tryserver%20builder%20name')
self.assertEqual(
factory._results_url_for_builder('Dummy tryserver builder name', True),
'https://test-results.appspot.com/testfile?'
'testtype=blink_web_tests%20%28with%20patch%29'
'&name=results-small.json&master=tryserver.dummy.master'
'&builder=Dummy%20tryserver%20builder%20name')
def test_results_url_for_builder_with_custom_step_name(self):
factory = bot_test_expectations.BotTestExpectationsFactory(
self.fake_builder_list(), 'weblayer_shell_wpt')
self.assertEqual(
factory._results_url_for_builder('Dummy builder name'),
'https://test-results.appspot.com/testfile?testtype=weblayer_shell_wpt'
'&name=results-small.json&master=dummy.master&builder=Dummy%20builder%20name')
self.assertEqual(
factory._results_url_for_builder('Dummy tryserver builder name'),
'https://test-results.appspot.com/testfile?'
'testtype=weblayer_shell_wpt'
'&name=results-small.json&master=tryserver.dummy.master'
'&builder=Dummy%20tryserver%20builder%20name')
self.assertEqual(
factory._results_url_for_builder('Dummy tryserver builder name', True),
'https://test-results.appspot.com/testfile?'
'testtype=weblayer_shell_wpt%20%28with%20patch%29'
'&name=results-small.json&master=tryserver.dummy.master'
'&builder=Dummy%20tryserver%20builder%20name')
def test_expectations_for_builder(self):
factory = bot_test_expectations.BotTestExpectationsFactory(
self.fake_builder_list())
factory._results_json_for_builder = self.fake_results_json_for_builder
self.assertIsNotNone(
factory.expectations_for_builder('Dummy builder name'))
def test_expectations_for_port(self):
factory = bot_test_expectations.BotTestExpectationsFactory(
self.fake_builder_list())
factory._results_json_for_builder = self.fake_results_json_for_builder
self.assertIsNotNone(factory.expectations_for_port('dummy-port'))
@unittest.skipIf(sys.platform == 'win32', 'fails on Windows')
class BotTestExpectationsTest(unittest.TestCase):
# FIXME: Find a way to import this map from Tools/TestResultServer/model/jsonresults.py.
FAILURE_MAP = {
'C': 'CRASH',
'F': 'FAIL',
'N': 'NO DATA',
'P': 'PASS',
'T': 'TIMEOUT',
'Y': 'NOTRUN',
'X': 'SKIP'
}
# All result_string's in this file represent retries from a single run.
# The left-most entry is the first try, the right-most is the last.
def _assert_is_flaky(self,
results_string,
should_be_flaky,
only_consider_very_flaky,
expected=None):
results_json = self._results_json_from_test_data({})
expectations = bot_test_expectations.BotTestExpectations(
results_json, BuilderList({}), set('test'))
results_entry = self._results_from_string(results_string)
if expected:
results_entry[bot_test_expectations.ResultsJSON.
EXPECTATIONS_KEY] = expected
num_actual_results = len(
expectations._flaky_types_in_results( # pylint: disable=protected-access
results_entry, only_consider_very_flaky))
if should_be_flaky:
self.assertGreater(num_actual_results, 1)
else:
self.assertLessEqual(num_actual_results, 1)
def test_basic_flaky(self):
self._assert_is_flaky(
'P', should_be_flaky=False, only_consider_very_flaky=False)
self._assert_is_flaky(
'P', should_be_flaky=False, only_consider_very_flaky=True)
self._assert_is_flaky(
'F', should_be_flaky=False, only_consider_very_flaky=False)
self._assert_is_flaky(
'F', should_be_flaky=False, only_consider_very_flaky=True)
self._assert_is_flaky(
'FP', should_be_flaky=True, only_consider_very_flaky=False)
self._assert_is_flaky(
'FP', should_be_flaky=False, only_consider_very_flaky=True)
self._assert_is_flaky(
'FFP', should_be_flaky=True, only_consider_very_flaky=False)
self._assert_is_flaky(
'FFP', should_be_flaky=True, only_consider_very_flaky=True)
self._assert_is_flaky(
'FFT', should_be_flaky=True, only_consider_very_flaky=False)
self._assert_is_flaky(
'FFT', should_be_flaky=True, only_consider_very_flaky=True)
self._assert_is_flaky(
'FFF', should_be_flaky=False, only_consider_very_flaky=False)
self._assert_is_flaky(
'FFF', should_be_flaky=False, only_consider_very_flaky=True)
self._assert_is_flaky(
'FT',
should_be_flaky=True,
only_consider_very_flaky=False,
expected='TIMEOUT')
self._assert_is_flaky(
'FT',
should_be_flaky=False,
only_consider_very_flaky=True,
expected='TIMEOUT')
self._assert_is_flaky(
'FFT',
should_be_flaky=True,
only_consider_very_flaky=False,
expected='TIMEOUT')
self._assert_is_flaky(
'FFT',
should_be_flaky=True,
only_consider_very_flaky=True,
expected='TIMEOUT')
def _results_json_from_test_data(self, test_data):
test_data[bot_test_expectations.ResultsJSON.FAILURE_MAP_KEY] = \
self.FAILURE_MAP
json_dict = {
'builder': test_data,
}
return bot_test_expectations.ResultsJSON('builder', json_dict)
def _results_filter_from_test_data(self, test_data):
json_dict = {
'builder': test_data,
}
return bot_test_expectations.ResultsFilter('builder', json_dict)
def _results_from_string(self, results_string):
return {'results': [[1, results_string]]}
def _assert_expectations(self, test_data, expectations_string,
only_consider_very_flaky, **kwargs):
results_json = self._results_json_from_test_data(test_data)
expectations = bot_test_expectations.BotTestExpectations(
results_json, BuilderList({}), set('test'))
self.assertEqual(
expectations.flakes_by_path(only_consider_very_flaky, **kwargs),
expectations_string)
def _assert_unexpected_results(self, test_data, expectations_string):
results_json = self._results_json_from_test_data(test_data)
expectations = bot_test_expectations.BotTestExpectations(
results_json, BuilderList({}), set('test'))
self.assertEqual(expectations.unexpected_results_by_path(),
expectations_string)
def test_all_results_by_path(self):
test_data = {
'tests': {
'foo': {
'multiple_pass.html': {
'results': [[4, 'P'], [1, 'P'], [2, 'P']]
},
'fail.html': {
'results': [[2, 'F']]
},
'all_types.html': {
'results': [[1, 'C'], [2, 'F'], [1, 'N'], [1, 'P'],
[1, 'T'], [1, 'Y'], [10, 'X']]
},
'not_run.html': {
'results': []
},
}
}
}
results_json = self._results_json_from_test_data(test_data)
expectations = bot_test_expectations.BotTestExpectations(
results_json, BuilderList({}), set('test'))
results_by_path = expectations.all_results_by_path()
expected_output = {
'foo/multiple_pass.html': ['PASS'],
'foo/fail.html': ['FAIL'],
'foo/all_types.html': ['CRASH', 'FAIL', 'PASS', 'TIMEOUT']
}
self.assertEqual(results_by_path, expected_output)
def test_filtered_all_results_by_path(self):
test_data = {
'buildNumbers': [1, 2 , 3, 4, 5, 6, 7],
'tests': {
'foo': {
'fail_filtered.html': {'results': [[4, 'P'], [1, 'F'], [1, 'C'], [1, 'P']]},
'fail_not_filtered.html': {'results': [[3, 'P'], [2, 'F'], [2, 'P']]},
}
}
}
filter_data = {
'buildNumbers': [3, 4, 5, 6, 8],
'num_failures_by_type' : {
'PASS' : [0, 0, 1, 1, 1, 0, 1]
}
}
results_json = self._results_json_from_test_data(test_data)
results_filter = self._results_filter_from_test_data(filter_data)
expectations = bot_test_expectations.BotTestExpectations(results_json, BuilderList({}), set('test'), results_filter)
results_by_path = expectations.all_results_by_path()
expected_output = {
'foo/fail_filtered.html': ['PASS'],
'foo/fail_not_filtered.html': ['FAIL', 'PASS'],
}
self.assertEqual(results_by_path, expected_output)
def test_basic(self):
test_data = {
'tests': {
'foo': {
'veryflaky.html': self._results_from_string('FFP'),
'maybeflaky.html': self._results_from_string('FP'),
'notflakypass.html': self._results_from_string('P'),
'notflakyfail.html': self._results_from_string('F'),
# Even if there are no expected results, it's not very flaky if it didn't do multiple retries.
# This accounts for the latest expectations not necessarily matching the expectations
# at the time of the given run.
'notverflakynoexpected.html':
self._results_from_string('FT'),
# If the test is flaky, but marked as such, it shouldn't get printed out.
'notflakyexpected.html': {
'results': [[2, 'FFFP']],
'expected': 'PASS FAIL'
},
'flakywithoutretries.html': {
'results': [[1, 'F'], [1, 'P']],
},
}
}
}
self._assert_expectations(
test_data, {
'foo/veryflaky.html': {'FAIL', 'PASS'},
},
only_consider_very_flaky=True)
self._assert_expectations(
test_data, {
'foo/veryflaky.html': {'FAIL', 'PASS'},
'foo/notverflakynoexpected.html': {'FAIL', 'TIMEOUT'},
'foo/maybeflaky.html': {'FAIL', 'PASS'},
},
only_consider_very_flaky=False)
self._assert_expectations(
test_data, {
'foo/veryflaky.html': {'FAIL', 'PASS'},
'foo/notflakyexpected.html': {'FAIL', 'PASS'},
},
only_consider_very_flaky=True, ignore_bot_expected_results=True)
self._assert_expectations(
test_data, {
'foo/veryflaky.html': {'FAIL', 'PASS'},
'foo/notflakyexpected.html': {'FAIL', 'PASS'},
'foo/flakywithoutretries.html': {'FAIL', 'PASS'},
'foo/notverflakynoexpected.html': {'FAIL', 'TIMEOUT'},
'foo/maybeflaky.html': {'FAIL', 'PASS'},
},
only_consider_very_flaky=False, ignore_bot_expected_results=True,
consider_only_flaky_runs=False)
def test_unexpected_results_no_unexpected(self):
test_data = {
'tests': {
'foo': {
'pass1.html': {
'results': [[4, 'P']]
},
'pass2.html': {
'results': [[2, 'F']],
'expected': 'PASS FAIL'
},
'fail.html': {
'results': [[2, 'P'], [1, 'F']],
'expected': 'PASS FAIL'
},
'not_run.html': {
'results': []
},
'crash.html': {
'results': [[2, 'F'], [1, 'C']],
'expected': 'CRASH FAIL SKIP'
},
}
}
}
self._assert_unexpected_results(test_data, {})
def test_unexpected_results_all_unexpected(self):
test_data = {
'tests': {
'foo': {
'pass1.html': {
'results': [[4, 'P']],
'expected': 'FAIL'
},
'pass2.html': {
'results': [[2, 'P']],
'expected': 'FAIL'
},
'fail.html': {
'results': [[4, 'F']]
},
'f_p.html': {
'results': [[1, 'F'], [2, 'P']]
},
'crash.html': {
'results': [[2, 'F'], [1, 'C']],
'expected': 'SKIP'
},
'image.html': {
'results': [[3, 'F']],
'expected': 'CRASH FAIL'
},
'i_f.html': {
'results': [[6, 'F']],
'expected': 'PASS'
},
'all.html':
self._results_from_string('FPFPCNCNTXTXFFFFFCFCYY'),
}
}
}
self._assert_unexpected_results(
test_data, {
'foo/pass1.html': {'FAIL', 'PASS'},
'foo/pass2.html': {'FAIL', 'PASS'},
'foo/fail.html': {'FAIL', 'PASS'},
'foo/f_p.html': {'FAIL', 'PASS'},
'foo/crash.html': {'SKIP', 'CRASH', 'FAIL'},
'foo/i_f.html': {'FAIL', 'PASS'},
'foo/all.html': {'PASS', 'FAIL', 'TIMEOUT', 'CRASH'},
})
|
|
from __future__ import unicode_literals
import tablib
from copy import deepcopy
from datetime import date
from decimal import Decimal
from unittest import skip, skipUnless
from django import VERSION
from django.conf import settings
from django.contrib.auth.models import User
from django.db import IntegrityError, DatabaseError
from django.db.models import Count
from django.db.models.fields import FieldDoesNotExist
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from django.utils.html import strip_tags
from import_export import fields, resources, results, widgets
from import_export.instance_loaders import ModelInstanceLoader
from import_export.resources import Diff
from ..models import (
Author, Book, Category, Entry, Profile, WithDefault, WithDynamicDefault,
WithFloatField,
)
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
class MyResource(resources.Resource):
name = fields.Field()
email = fields.Field()
extra = fields.Field()
class Meta:
export_order = ('email', 'name')
class ResourceTestCase(TestCase):
def setUp(self):
self.my_resource = MyResource()
def test_fields(self):
fields = self.my_resource.fields
self.assertIn('name', fields)
def test_field_column_name(self):
field = self.my_resource.fields['name']
self.assertIn(field.column_name, 'name')
def test_meta(self):
self.assertIsInstance(self.my_resource._meta,
resources.ResourceOptions)
def test_get_export_order(self):
self.assertEqual(self.my_resource.get_export_headers(),
['email', 'name', 'extra'])
# Issue 140 Attributes aren't inherited by subclasses
def test_inheritance(self):
class A(MyResource):
inherited = fields.Field()
class Meta:
import_id_fields = ('email',)
class B(A):
local = fields.Field()
class Meta:
export_order = ('email', 'extra')
resource = B()
self.assertIn('name', resource.fields)
self.assertIn('inherited', resource.fields)
self.assertIn('local', resource.fields)
self.assertEqual(resource.get_export_headers(),
['email', 'extra', 'name', 'inherited', 'local'])
self.assertEqual(resource._meta.import_id_fields, ('email',))
def test_inheritance_with_custom_attributes(self):
class A(MyResource):
inherited = fields.Field()
class Meta:
import_id_fields = ('email',)
custom_attribute = True
class B(A):
local = fields.Field()
resource = B()
self.assertEqual(resource._meta.custom_attribute, True)
class AuthorResource(resources.ModelResource):
books = fields.Field(
column_name='books',
attribute='book_set',
readonly=True,
)
class Meta:
model = Author
export_order = ('name', 'books')
class BookResource(resources.ModelResource):
published = fields.Field(column_name='published_date')
class Meta:
model = Book
exclude = ('imported', )
class CategoryResource(resources.ModelResource):
class Meta:
model = Category
class ProfileResource(resources.ModelResource):
class Meta:
model = Profile
exclude = ('user', )
class WithDefaultResource(resources.ModelResource):
class Meta:
model = WithDefault
fields = ('name',)
class ModelResourceTest(TestCase):
def setUp(self):
self.resource = BookResource()
self.book = Book.objects.create(name="Some book")
self.dataset = tablib.Dataset(headers=['id', 'name', 'author_email',
'price'])
row = [self.book.pk, 'Some book', 'test@example.com', "10.25"]
self.dataset.append(row)
def test_default_instance_loader_class(self):
self.assertIs(self.resource._meta.instance_loader_class,
ModelInstanceLoader)
def test_fields(self):
fields = self.resource.fields
self.assertIn('id', fields)
self.assertIn('name', fields)
self.assertIn('author_email', fields)
self.assertIn('price', fields)
def test_fields_foreign_key(self):
fields = self.resource.fields
self.assertIn('author', fields)
widget = fields['author'].widget
self.assertIsInstance(widget, widgets.ForeignKeyWidget)
self.assertEqual(widget.model, Author)
def test_fields_m2m(self):
fields = self.resource.fields
self.assertIn('categories', fields)
def test_excluded_fields(self):
self.assertNotIn('imported', self.resource.fields)
def test_init_instance(self):
instance = self.resource.init_instance()
self.assertIsInstance(instance, Book)
def test_default(self):
self.assertEquals(WithDefaultResource.fields['name'].clean({'name': ''}), 'foo_bar')
def test_get_instance(self):
instance_loader = self.resource._meta.instance_loader_class(
self.resource)
self.resource._meta.import_id_fields = ['id']
instance = self.resource.get_instance(instance_loader,
self.dataset.dict[0])
self.assertEqual(instance, self.book)
def test_get_instance_import_id_fields(self):
class BookResource(resources.ModelResource):
name = fields.Field(attribute='name', widget=widgets.CharWidget())
class Meta:
model = Book
import_id_fields = ['name']
resource = BookResource()
instance_loader = resource._meta.instance_loader_class(resource)
instance = resource.get_instance(instance_loader, self.dataset.dict[0])
self.assertEqual(instance, self.book)
def test_get_instance_with_missing_field_data(self):
instance_loader = self.resource._meta.instance_loader_class(
self.resource)
# construct a dataset with a missing "id" column
dataset = tablib.Dataset(headers=['name', 'author_email', 'price'])
dataset.append(['Some book', 'test@example.com', "10.25"])
with self.assertRaises(KeyError) as cm:
self.resource.get_instance(instance_loader, dataset.dict[0])
self.assertEqual(u"Column 'id' not found in dataset. Available columns "
"are: %s" % [u'name', u'author_email', u'price'],
cm.exception.args[0])
def test_get_export_headers(self):
headers = self.resource.get_export_headers()
self.assertEqual(headers, ['published_date', 'id', 'name', 'author',
'author_email', 'published_time', 'price',
'categories', ])
def test_export(self):
dataset = self.resource.export(Book.objects.all())
self.assertEqual(len(dataset), 1)
def test_export_iterable(self):
dataset = self.resource.export(list(Book.objects.all()))
self.assertEqual(len(dataset), 1)
def test_get_diff(self):
diff = Diff(self.resource, self.book, False)
book2 = Book(name="Some other book")
diff.compare_with(self.resource, book2)
html = diff.as_html()
headers = self.resource.get_export_headers()
self.assertEqual(html[headers.index('name')],
u'<span>Some </span><ins style="background:#e6ffe6;">'
u'other </ins><span>book</span>')
self.assertFalse(html[headers.index('author_email')])
@skip("See: https://github.com/django-import-export/django-import-export/issues/311")
def test_get_diff_with_callable_related_manager(self):
resource = AuthorResource()
author = Author(name="Some author")
author.save()
author2 = Author(name="Some author")
self.book.author = author
self.book.save()
diff = Diff(self.resource, author, False)
diff.compare_with(self.resource, author2)
html = diff.as_html()
headers = resource.get_export_headers()
self.assertEqual(html[headers.index('books')],
'<span>core.Book.None</span>')
def test_import_data(self):
result = self.resource.import_data(self.dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), 1)
self.assertTrue(result.rows[0].diff)
self.assertEqual(result.rows[0].import_type,
results.RowResult.IMPORT_TYPE_UPDATE)
instance = Book.objects.get(pk=self.book.pk)
self.assertEqual(instance.author_email, 'test@example.com')
self.assertEqual(instance.price, Decimal("10.25"))
def test_import_data_value_error_includes_field_name(self):
class AuthorResource(resources.ModelResource):
class Meta:
model = Author
resource = AuthorResource()
dataset = tablib.Dataset(headers=['id', 'name', 'birthday'])
dataset.append(['', 'A.A.Milne', '1882test-01-18'])
result = resource.import_data(dataset, raise_errors=False)
self.assertTrue(result.has_errors())
self.assertTrue(result.rows[0].errors)
msg = ("Column 'birthday': Enter a valid date/time.")
actual = result.rows[0].errors[0].error
self.assertIsInstance(actual, ValueError)
self.assertEqual(msg, str(actual))
def test_import_data_error_saving_model(self):
row = list(self.dataset.pop())
# set pk to something that would yield error
row[0] = 'foo'
self.dataset.append(row)
result = self.resource.import_data(self.dataset, raise_errors=False)
self.assertTrue(result.has_errors())
self.assertTrue(result.rows[0].errors)
actual = result.rows[0].errors[0].error
self.assertIsInstance(actual, ValueError)
self.assertIn("Column 'id': could not convert string to float",
str(actual))
def test_import_data_delete(self):
class B(BookResource):
delete = fields.Field(widget=widgets.BooleanWidget())
def for_delete(self, row, instance):
return self.fields['delete'].clean(row)
row = [self.book.pk, self.book.name, '1']
dataset = tablib.Dataset(*[row], headers=['id', 'name', 'delete'])
result = B().import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(result.rows[0].import_type,
results.RowResult.IMPORT_TYPE_DELETE)
self.assertFalse(Book.objects.filter(pk=self.book.pk))
def test_save_instance_with_dry_run_flag(self):
class B(BookResource):
def before_save_instance(self, instance, using_transactions, dry_run):
super(B, self).before_save_instance(instance, using_transactions, dry_run)
if dry_run:
self.before_save_instance_dry_run = True
else:
self.before_save_instance_dry_run = False
def save_instance(self, instance, using_transactions=True, dry_run=False):
super(B, self).save_instance(instance, using_transactions, dry_run)
if dry_run:
self.save_instance_dry_run = True
else:
self.save_instance_dry_run = False
def after_save_instance(self, instance, using_transactions, dry_run):
super(B, self).after_save_instance(instance, using_transactions, dry_run)
if dry_run:
self.after_save_instance_dry_run = True
else:
self.after_save_instance_dry_run = False
resource = B()
resource.import_data(self.dataset, dry_run=True, raise_errors=True)
self.assertTrue(resource.before_save_instance_dry_run)
self.assertTrue(resource.save_instance_dry_run)
self.assertTrue(resource.after_save_instance_dry_run)
resource.import_data(self.dataset, dry_run=False, raise_errors=True)
self.assertFalse(resource.before_save_instance_dry_run)
self.assertFalse(resource.save_instance_dry_run)
self.assertFalse(resource.after_save_instance_dry_run)
def test_delete_instance_with_dry_run_flag(self):
class B(BookResource):
delete = fields.Field(widget=widgets.BooleanWidget())
def for_delete(self, row, instance):
return self.fields['delete'].clean(row)
def before_delete_instance(self, instance, dry_run):
super(B, self).before_delete_instance(instance, dry_run)
if dry_run:
self.before_delete_instance_dry_run = True
else:
self.before_delete_instance_dry_run = False
def delete_instance(self, instance, using_transactions=True, dry_run=False):
super(B, self).delete_instance(instance, using_transactions, dry_run)
if dry_run:
self.delete_instance_dry_run = True
else:
self.delete_instance_dry_run = False
def after_delete_instance(self, instance, dry_run):
super(B, self).after_delete_instance(instance, dry_run)
if dry_run:
self.after_delete_instance_dry_run = True
else:
self.after_delete_instance_dry_run = False
resource = B()
row = [self.book.pk, self.book.name, '1']
dataset = tablib.Dataset(*[row], headers=['id', 'name', 'delete'])
resource.import_data(dataset, dry_run=True, raise_errors=True)
self.assertTrue(resource.before_delete_instance_dry_run)
self.assertTrue(resource.delete_instance_dry_run)
self.assertTrue(resource.after_delete_instance_dry_run)
resource.import_data(dataset, dry_run=False, raise_errors=True)
self.assertFalse(resource.before_delete_instance_dry_run)
self.assertFalse(resource.delete_instance_dry_run)
self.assertFalse(resource.after_delete_instance_dry_run)
def test_relationships_fields(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('author__name',)
author = Author.objects.create(name="Author")
self.book.author = author
resource = B()
result = resource.fields['author__name'].export(self.book)
self.assertEqual(result, author.name)
def test_dehydrating_fields(self):
class B(resources.ModelResource):
full_title = fields.Field(column_name="Full title")
class Meta:
model = Book
fields = ('author__name', 'full_title')
def dehydrate_full_title(self, obj):
return '%s by %s' % (obj.name, obj.author.name)
author = Author.objects.create(name="Author")
self.book.author = author
resource = B()
full_title = resource.export_field(resource.get_fields()[0], self.book)
self.assertEqual(full_title, '%s by %s' % (self.book.name,
self.book.author.name))
def test_widget_fomat_in_fk_field(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('author__birthday',)
widgets = {
'author__birthday': {'format': '%Y-%m-%d'},
}
author = Author.objects.create(name="Author")
self.book.author = author
resource = B()
result = resource.fields['author__birthday'].export(self.book)
self.assertEqual(result, str(date.today()))
def test_widget_kwargs_for_field(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('published',)
widgets = {
'published': {'format': '%d.%m.%Y'},
}
resource = B()
self.book.published = date(2012, 8, 13)
result = resource.fields['published'].export(self.book)
self.assertEqual(result, "13.08.2012")
def test_foreign_keys_export(self):
author1 = Author.objects.create(name='Foo')
self.book.author = author1
self.book.save()
dataset = self.resource.export(Book.objects.all())
self.assertEqual(dataset.dict[0]['author'], author1.pk)
def test_foreign_keys_import(self):
author2 = Author.objects.create(name='Bar')
headers = ['id', 'name', 'author']
row = [None, 'FooBook', author2.pk]
dataset = tablib.Dataset(row, headers=headers)
self.resource.import_data(dataset, raise_errors=True)
book = Book.objects.get(name='FooBook')
self.assertEqual(book.author, author2)
def test_m2m_export(self):
cat1 = Category.objects.create(name='Cat 1')
cat2 = Category.objects.create(name='Cat 2')
self.book.categories.add(cat1)
self.book.categories.add(cat2)
dataset = self.resource.export(Book.objects.all())
self.assertEqual(dataset.dict[0]['categories'],
'%d,%d' % (cat1.pk, cat2.pk))
def test_m2m_import(self):
cat1 = Category.objects.create(name='Cat 1')
headers = ['id', 'name', 'categories']
row = [None, 'FooBook', "%s" % cat1.pk]
dataset = tablib.Dataset(row, headers=headers)
self.resource.import_data(dataset, raise_errors=True)
book = Book.objects.get(name='FooBook')
self.assertIn(cat1, book.categories.all())
def test_m2m_options_import(self):
cat1 = Category.objects.create(name='Cat 1')
cat2 = Category.objects.create(name='Cat 2')
headers = ['id', 'name', 'categories']
row = [None, 'FooBook', "Cat 1|Cat 2"]
dataset = tablib.Dataset(row, headers=headers)
class BookM2MResource(resources.ModelResource):
categories = fields.Field(
attribute='categories',
widget=widgets.ManyToManyWidget(Category, field='name',
separator='|')
)
class Meta:
model = Book
resource = BookM2MResource()
resource.import_data(dataset, raise_errors=True)
book = Book.objects.get(name='FooBook')
self.assertIn(cat1, book.categories.all())
self.assertIn(cat2, book.categories.all())
def test_related_one_to_one(self):
# issue #17 - Exception when attempting access something on the
# related_name
user = User.objects.create(username='foo')
profile = Profile.objects.create(user=user)
Entry.objects.create(user=user)
Entry.objects.create(user=User.objects.create(username='bar'))
class EntryResource(resources.ModelResource):
class Meta:
model = Entry
fields = ('user__profile', 'user__profile__is_private')
resource = EntryResource()
dataset = resource.export(Entry.objects.all())
self.assertEqual(dataset.dict[0]['user__profile'], profile.pk)
self.assertEqual(dataset.dict[0]['user__profile__is_private'], '1')
self.assertEqual(dataset.dict[1]['user__profile'], '')
self.assertEqual(dataset.dict[1]['user__profile__is_private'], '')
def test_empty_get_queryset(self):
# issue #25 - Overriding queryset on export() fails when passed
# queryset has zero elements
dataset = self.resource.export(Book.objects.none())
self.assertEqual(len(dataset), 0)
def test_import_data_skip_unchanged(self):
def attempted_save(instance, real_dry_run):
self.fail('Resource attempted to save instead of skipping')
# Make sure we test with ManyToMany related objects
cat1 = Category.objects.create(name='Cat 1')
cat2 = Category.objects.create(name='Cat 2')
self.book.categories.add(cat1)
self.book.categories.add(cat2)
dataset = self.resource.export()
# Create a new resource that attempts to reimport the data currently
# in the database while skipping unchanged rows (i.e. all of them)
resource = deepcopy(self.resource)
resource._meta.skip_unchanged = True
# Fail the test if the resource attempts to save the row
resource.save_instance = attempted_save
result = resource.import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), len(dataset))
self.assertTrue(result.rows[0].diff)
self.assertEqual(result.rows[0].import_type,
results.RowResult.IMPORT_TYPE_SKIP)
# Test that we can suppress reporting of skipped rows
resource._meta.report_skipped = False
result = resource.import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), 0)
def test_before_import_access_to_kwargs(self):
class B(BookResource):
def before_import(self, dataset, using_transactions, dry_run, **kwargs):
if 'extra_arg' in kwargs:
dataset.headers[dataset.headers.index('author_email')] = 'old_email'
dataset.insert_col(0,
lambda row: kwargs['extra_arg'],
header='author_email')
resource = B()
result = resource.import_data(self.dataset, raise_errors=True,
extra_arg='extra@example.com')
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), 1)
instance = Book.objects.get(pk=self.book.pk)
self.assertEqual(instance.author_email, 'extra@example.com')
def test_link_to_nonexistent_field(self):
with self.assertRaises(FieldDoesNotExist) as cm:
class BrokenBook1(resources.ModelResource):
class Meta:
model = Book
fields = ('nonexistent__invalid',)
self.assertEqual("Book.nonexistent: Book has no field named 'nonexistent'",
cm.exception.args[0])
with self.assertRaises(FieldDoesNotExist) as cm:
class BrokenBook2(resources.ModelResource):
class Meta:
model = Book
fields = ('author__nonexistent',)
self.assertEqual("Book.author.nonexistent: Author has no field named "
"'nonexistent'", cm.exception.args[0])
def test_link_to_nonrelation_field(self):
with self.assertRaises(KeyError) as cm:
class BrokenBook1(resources.ModelResource):
class Meta:
model = Book
fields = ('published__invalid',)
self.assertEqual("Book.published is not a relation",
cm.exception.args[0])
with self.assertRaises(KeyError) as cm:
class BrokenBook2(resources.ModelResource):
class Meta:
model = Book
fields = ('author__name__invalid',)
self.assertEqual("Book.author.name is not a relation",
cm.exception.args[0])
def test_override_field_construction_in_resource(self):
class B(resources.ModelResource):
class Meta:
model = Book
fields = ('published',)
@classmethod
def field_from_django_field(self, field_name, django_field,
readonly):
if field_name == 'published':
return {'sound': 'quack'}
B()
self.assertEqual({'sound': 'quack'}, B.fields['published'])
def test_readonly_annotated_field_import_and_export(self):
class B(BookResource):
total_categories = fields.Field('total_categories', readonly=True)
class Meta:
model = Book
skip_unchanged = True
cat1 = Category.objects.create(name='Cat 1')
self.book.categories.add(cat1)
resource = B()
# Verify that the annotated field is correctly exported
dataset = resource.export(
Book.objects.annotate(total_categories=Count('categories')))
self.assertEqual(int(dataset.dict[0]['total_categories']), 1)
# Verify that importing the annotated field raises no errors and that
# the rows are skipped
result = resource.import_data(dataset, raise_errors=True)
self.assertFalse(result.has_errors())
self.assertEqual(len(result.rows), len(dataset))
self.assertEqual(
result.rows[0].import_type, results.RowResult.IMPORT_TYPE_SKIP)
def test_follow_relationship_for_modelresource(self):
class EntryResource(resources.ModelResource):
username = fields.Field(attribute='user__username', readonly=False)
class Meta:
model = Entry
fields = ('id', )
def after_save_instance(self, instance, using_transactions, dry_run):
if not using_transactions and dry_run:
# we don't have transactions and we want to do a dry_run
pass
else:
instance.user.save()
user = User.objects.create(username='foo')
entry = Entry.objects.create(user=user)
row = [
entry.pk,
'bar',
]
self.dataset = tablib.Dataset(headers=['id', 'username'])
self.dataset.append(row)
result = EntryResource().import_data(
self.dataset, raise_errors=True, dry_run=False)
self.assertFalse(result.has_errors())
self.assertEquals(User.objects.get(pk=user.pk).username, 'bar')
def test_import_data_dynamic_default_callable(self):
class DynamicDefaultResource(resources.ModelResource):
class Meta:
model = WithDynamicDefault
fields = ('id', 'name',)
self.assertTrue(callable(DynamicDefaultResource.fields['name'].default))
resource = DynamicDefaultResource()
dataset = tablib.Dataset(headers=['id', 'name', ])
dataset.append([1, None])
dataset.append([2, None])
resource.import_data(dataset, raise_errors=False)
objs = WithDynamicDefault.objects.all()
self.assertNotEqual(objs[0].name, objs[1].name)
def test_float_field(self):
#433
class R(resources.ModelResource):
class Meta:
model = WithFloatField
resource = R()
dataset = tablib.Dataset(headers=['id', 'f', ])
dataset.append([None, None])
dataset.append([None, ''])
resource.import_data(dataset, raise_errors=True)
self.assertEqual(WithFloatField.objects.all()[0].f, None)
self.assertEqual(WithFloatField.objects.all()[1].f, None)
class ModelResourceTransactionTest(TransactionTestCase):
@skipUnlessDBFeature('supports_transactions')
def test_m2m_import_with_transactions(self):
resource = BookResource()
cat1 = Category.objects.create(name='Cat 1')
headers = ['id', 'name', 'categories']
row = [None, 'FooBook', "%s" % cat1.pk]
dataset = tablib.Dataset(row, headers=headers)
result = resource.import_data(
dataset, dry_run=True, use_transactions=True
)
row_diff = result.rows[0].diff
fields = resource.get_fields()
id_field = resource.fields['id']
id_diff = row_diff[fields.index(id_field)]
# id diff should exist because in rollbacked transaction
# FooBook has been saved
self.assertTrue(id_diff)
category_field = resource.fields['categories']
categories_diff = row_diff[fields.index(category_field)]
self.assertEqual(strip_tags(categories_diff), force_text(cat1.pk))
# check that it is really rollbacked
self.assertFalse(Book.objects.filter(name='FooBook'))
@skipUnlessDBFeature('supports_transactions')
def test_m2m_import_with_transactions_error(self):
resource = ProfileResource()
headers = ['id', 'user']
# 'user' is a required field, the database will raise an error.
row = [None, None]
dataset = tablib.Dataset(row, headers=headers)
result = resource.import_data(
dataset, dry_run=True, use_transactions=True
)
# Ensure the error raised by the database has been saved.
self.assertTrue(result.has_errors())
# Ensure the rollback has worked properly.
self.assertEqual(Profile.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_integrity_error_rollback_on_savem2m(self):
# savepoint_rollback() after an IntegrityError gives
# TransactionManagementError (#399)
class CategoryResourceRaisesIntegrityError(CategoryResource):
def save_m2m(self, instance, *args, **kwargs):
# force raising IntegrityError
Category.objects.create(name=instance.name)
resource = CategoryResourceRaisesIntegrityError()
headers = ['id', 'name']
rows = [
[None, 'foo'],
]
dataset = tablib.Dataset(*rows, headers=headers)
result = resource.import_data(
dataset,
use_transactions=True,
)
self.assertTrue(result.has_errors())
@skipUnlessDBFeature('supports_transactions')
def test_multiple_database_errors(self):
class CategoryResourceDbErrorsResource(CategoryResource):
def before_import(self, *args, **kwargs):
raise DatabaseError()
def save_instance(self):
raise DatabaseError()
resource = CategoryResourceDbErrorsResource()
headers = ['id', 'name']
rows = [
[None, 'foo'],
]
dataset = tablib.Dataset(*rows, headers=headers)
result = resource.import_data(
dataset,
use_transactions=True,
)
self.assertTrue(result.has_errors())
class ModelResourceFactoryTest(TestCase):
def test_create(self):
BookResource = resources.modelresource_factory(Book)
self.assertIn('id', BookResource.fields)
self.assertEqual(BookResource._meta.model, Book)
@skipUnless(
'postgresql' in settings.DATABASES['default']['ENGINE'],
'Run only against Postgres')
class PostgresTests(TransactionTestCase):
# Make sure to start the sequences back at 1
reset_sequences = True
def test_create_object_after_importing_dataset_with_id(self):
dataset = tablib.Dataset(headers=['id', 'name'])
dataset.append([1, 'Some book'])
resource = BookResource()
result = resource.import_data(dataset)
self.assertFalse(result.has_errors())
try:
Book.objects.create(name='Some other book')
except IntegrityError:
self.fail('IntegrityError was raised.')
def test_collect_failed_rows(self):
resource = ProfileResource()
headers = ['id', 'user']
# 'user' is a required field, the database will raise an error.
row = [None, None]
dataset = tablib.Dataset(row, headers=headers)
result = resource.import_data(
dataset, dry_run=True, use_transactions=True,
collect_failed_rows=True,
)
self.assertEqual(
result.failed_dataset.headers,
[u'id', u'user', u'Error']
)
self.assertEqual(len(result.failed_dataset), 1)
# We can't check the error message because it's package- and version-dependent
if VERSION >= (1, 8) and 'postgresql' in settings.DATABASES['default']['ENGINE']:
from django.contrib.postgres.fields import ArrayField
from django.db import models
class BookWithChapters(models.Model):
name = models.CharField('Book name', max_length=100)
chapters = ArrayField(models.CharField(max_length=100), default=list)
class ArrayFieldTest(TestCase):
fixtures = []
def setUp(self):
pass
def test_arrayfield(self):
dataset_headers = ["id", "name", "chapters"]
chapters = ["Introduction", "Middle Chapter", "Ending"]
dataset_row = ["1", "Book With Chapters", ",".join(chapters)]
dataset = tablib.Dataset(headers=dataset_headers)
dataset.append(dataset_row)
book_with_chapters_resource = resources.modelresource_factory(model=BookWithChapters)()
result = book_with_chapters_resource.import_data(dataset, dry_run=False)
self.assertFalse(result.has_errors())
book_with_chapters = list(BookWithChapters.objects.all())[0]
self.assertListEqual(book_with_chapters.chapters, chapters)
class ManyRelatedManagerDiffTest(TestCase):
fixtures = ["category", "book"]
def setUp(self):
pass
def test_related_manager_diff(self):
dataset_headers = ["id", "name", "categories"]
dataset_row = ["1", "Test Book", "1"]
original_dataset = tablib.Dataset(headers=dataset_headers)
original_dataset.append(dataset_row)
dataset_row[2] = "2"
changed_dataset = tablib.Dataset(headers=dataset_headers)
changed_dataset.append(dataset_row)
book_resource = BookResource()
export_headers = book_resource.get_export_headers()
add_result = book_resource.import_data(original_dataset, dry_run=False)
expected_value = u'<ins style="background:#e6ffe6;">1</ins>'
self.check_value(add_result, export_headers, expected_value)
change_result = book_resource.import_data(changed_dataset, dry_run=False)
expected_value = u'<del style="background:#ffe6e6;">1</del><ins style="background:#e6ffe6;">2</ins>'
self.check_value(change_result, export_headers, expected_value)
def check_value(self, result, export_headers, expected_value):
self.assertEqual(len(result.rows), 1)
diff = result.rows[0].diff
self.assertEqual(diff[export_headers.index("categories")],
expected_value)
|
|
#!/usr/bin/env python3
import os
import shutil
import json
import yaml
from PIL import Image
from nxtools import *
class GremaProduct():
def __init__(self, parent, title):
self.parent = parent
self.title = title
self.slug = slugify(title)
@property
def data_dir(self):
return self.parent.data_dir
@property
def site_dir(self):
return self.parent.site_dir
@property
def data_path(self):
return os.path.join(self.data_dir, self.parent.parent.slug, self.parent.slug, self.slug + ".txt")
@property
def image_path(self):
return os.path.join(self.data_dir, self.parent.parent.slug, self.parent.slug, self.slug + ".jpg")
@property
def has_image(self):
return os.path.exists(self.image_path)
@property
def meta(self):
group_slug = self.parent.slug
cat_slug = self.parent.parent.slug
return {
"slug" : self.slug,
"title" : self.title,
"group_slug" : group_slug,
"group_title" : self.parent.title,
"cat_slug" : cat_slug,
"cat_title" : self.parent.parent.title,
"has_image" : self.has_image,
"image" : os.path.join("/products", cat_slug, group_slug, "{}.jpg".format(self.slug)) if self.has_image else "false"
}
def build(self, root_dir):
#output_dir = os.path.join(self.site_dir, "products", self.meta["cat_slug"], self.meta["group_slug"])
if not os.path.exists(self.data_path):
logging.warning("{} data file does not exist".format(self.data_path))
return
# read description and pricelist
description = ""
description_done = False
product_text = ""
pricelist = []
for pline in open(self.data_path).readlines():
r = pline.split(":")
if len(r) == 2 and r[1].strip().isdigit():
pricelist.append(r)
continue
if pline.strip() == "":
description_done = True
if not description_done:
description += pline
product_text += pline
description = description.replace("\n", "")
# write file
with open(os.path.join(root_dir, self.meta["slug"] + ".md"), "w") as f:
f.write("---\nlayout: product\n")
for key in self.meta:
f.write("{} : {}\n".format(key, self.meta[key]))
f.write("description : {}\n".format(description))
if pricelist:
f.write("pricing:\n")
for v, c in pricelist:
f.write(" - variant : {}\n".format(v.strip()))
f.write(" price : {}\n".format(c.strip()))
f.write("---\n")
f.write("\n{}\n\n".format(product_text.strip()))
# create images
if self.has_image:
original_image = Image.open(self.image_path)
image_full_path = os.path.join(root_dir, "{}.jpg".format(self.slug))
image_thumb_path = os.path.join(root_dir, "{}_tn.jpg".format(self.slug))
if os.path.exists(image_full_path):
image_full = original_image.resize((800, 500), Image.ANTIALIAS)
image_full.save(image_full_path)
if not os.path.exists(image_thumb_path):
image_thumb = original_image.resize((261, 163), Image.ANTIALIAS)
image_thumb.save(image_thumb_path)
class GremaProductGroup():
def __init__(self, parent, title):
self.parent = parent
self.title = title
self.slug = slugify(title)
self.products = []
def get_product(self, query):
for product in self.products:
if product.title == query or product.slug == query:
return product
@property
def description(self):
return "TODO: group description"
@property
def data_dir(self):
return self.parent.data_dir
@property
def group_dir(self):
return os.path.join(self.data_dir, self.parent.slug, self.slug)
@property
def site_dir(self):
return self.parent.site_dir
@property
def meta(self):
return {
"title" : self.title,
"slug" : self.slug,
"group_slug" : self.slug, # kvuli zvyraznovani v sidebaru
"cat_slug" : self.parent.slug,
"has_index" : os.path.exists(os.path.join(self.group_dir, "index.txt")),
"has_image" : os.path.exists(os.path.join(self.group_dir, "index.jpg"))
}
@property
def map(self):
result = {key : self.meta[key] for key in self.meta}
result["products"] = [product.meta for product in self.products]
return result
def build(self, root_dir):
group_dir = os.path.join(root_dir, self.slug)
if not os.path.exists(group_dir):
os.makedirs(group_dir)
# Create group index page
with open(os.path.join(group_dir, "index.md"), "w") as f:
f.write("---\nlayout: product_group\n")
for key in self.meta:
f.write("{} : {}\n".format(key, self.meta[key]))
f.write("products:\n")
for product in self.products:
f.write(" - slug: {}\n".format(product.slug))
f.write(" title: {}\n".format(product.title))
f.write(" has_image: {}\n".format(product.has_image))
f.write("---\n\n")
index_path = os.path.join(self.data_dir, self.parent.slug, self.slug, "index.txt")
if os.path.exists(index_path):
f.write(open(index_path).read())
# Convert index image
index_image_path = os.path.join(self.data_dir, self.parent.slug, self.slug, "index.jpg")
if os.path.exists(index_image_path):
original_image = Image.open(index_image_path)
image_full_path = os.path.join(group_dir, "index.jpg")
image_thumb_path = os.path.join(group_dir, "index_tn.jpg")
image_full = original_image.resize((800, 500), Image.ANTIALIAS)
image_full.save(image_full_path)
image_thumb = original_image.resize((261, 163), Image.ANTIALIAS)
image_thumb.save(image_thumb_path)
# Build products
for product in self.products:
product.build(group_dir)
class GremaCategory():
def __init__(self, parent, title):
self.parent = parent
self.title = title
self.slug = slugify(title)
self.load_groups()
def get_product(self, query):
for group in self.groups:
product = group.get_product(query)
if product:
return product
@property
def data_dir(self):
return self.parent.data_dir
@property
def site_dir(self):
return self.parent.site_dir
@property
def map(self):
return {
"title" : self.title,
"slug" : self.slug,
"groups" : [group.map for group in self.groups if (group.products or group.meta["has_index"])]
}
def load_groups(self):
self.groups = []
index_path = os.path.join(self.data_dir, "index-{}.yml".format(self.slug))
if not os.path.exists(index_path):
logging.error("{} does not exist".format(index_path))
return
data = yaml.safe_load(open(index_path))
if not data:
logging.error("No data in {}".format(index_path))
return
for group_title in data.keys():
logging.debug("Creating category {}".format(group_title))
group = GremaProductGroup(self, group_title)
if data[group_title]:
for product_title in data[group_title]:
product = GremaProduct(group, product_title)
group.products.append(product)
self.groups.append(group)
def build(self, root_dir):
category_dir = os.path.join(root_dir, self.slug)
if not os.path.exists(category_dir):
os.makedirs(category_dir)
for group in self.groups:
group.build(category_dir)
class GremaSite():
def __init__(self):
self.data_dir = "_source"
self.site_dir = "."
self.load_categories()
def get_product(self, query):
for category in self.categories:
product = category.get_product(query)
if product:
return product
def load_categories(self):
self.categories = []
index_path = os.path.join(self.data_dir, "index.yml")
if not os.path.exists(index_path):
return
for category_title in yaml.safe_load(open(index_path))["categories"]:
category_title = to_unicode(category_title)
self.categories.append(GremaCategory(self, category_title))
def build(self):
product_map = []
root_dir = os.path.join(self.site_dir, "products")
for category in self.categories:
logging.info("Creating category {}".format(category.title))
category.build(root_dir)
cmap = category.map
if cmap["groups"]:
product_map.append(cmap)
product_map_path = os.path.join(self.site_dir, "_data", "products.yml")
with open(product_map_path, 'w') as outfile:
outfile.write(
yaml.dump(product_map)
)
with open("data.json","w") as f:
json.dump(product_map, f)
# Default thumbnail
original_image = Image.open(os.path.join(self.data_dir, "default.png"))
image_full_path = os.path.join(self.site_dir, "static", "default.jpg")
image_thumb_path = os.path.join(self.site_dir, "static", "default_tn.jpg")
image_full = original_image.resize((640, 400), Image.ANTIALIAS)
image_full.save(image_full_path)
image_thumb = original_image.resize((261, 163), Image.ANTIALIAS)
image_thumb.save(image_thumb_path)
def import_products(site, data_dir):
for fname in os.listdir(data_dir):
if os.path.splitext(fname)[1] != ".txt":
continue
product_source_path = os.path.join(data_dir, fname)
base_name = get_base_name(fname)
image_source_path = os.path.join(data_dir, base_name + ".jpg")
product = site.get_product(base_name)
if not product:
continue
product_dir = os.path.dirname(product.data_path)
if not os.path.exists(product_dir):
os.makedirs(product_dir)
shutil.copy2(product_source_path, product.data_path)
if os.path.exists(image_source_path):
shutil.copy2(image_source_path, product.image_path)
if __name__ == "__main__":
grema = GremaSite()
grema.build()
|
|
# Copyright (c) 2014 NetApp, Inc.
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Generic driver module."""
import os
import time
import ddt
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from six import moves
from manila.common import constants as const
from manila import compute
from manila import context
from manila import exception
import manila.share.configuration
from manila.share.drivers import generic
from manila.share import share_types
from manila import test
from manila.tests import fake_compute
from manila.tests import fake_service_instance
from manila.tests import fake_share
from manila.tests import fake_utils
from manila.tests import fake_volume
from manila import utils
from manila import volume
CONF = cfg.CONF
def get_fake_manage_share():
return {
'id': 'fake',
'share_proto': 'NFS',
'share_type_id': 'fake',
'export_locations': [
{'path': '10.0.0.1:/foo/fake/path'},
{'path': '11.0.0.1:/bar/fake/path'},
],
}
def get_fake_snap_dict():
snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'created_at': '2015-08-10 00:05:58',
'updated_at': '2015-08-10 00:05:58',
'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'cgsnapshot_members': [
{
'status': 'available',
'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f',
'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f',
'user_id': 'a0314a441ca842019b0952224aa39192',
'deleted': 'False',
'created_at': '2015-08-10 00:05:58',
'share': {
'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8',
'deleted': False,
},
'updated_at': '2015-08-10 00:05:58',
'share_proto': 'NFS',
'project_id': '13c0be6290934bd98596cfa004650049',
'cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'deleted_at': None,
'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8',
'size': 1,
},
],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None,
}
return snap_dict
def get_fake_cg_dict():
cg_dict = {
'status': 'creating',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': 'False',
'created_at': '2015-08-10 00:07:58',
'updated_at': None,
'source_cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'host': 'openstack2@cmodeSSVMNFS',
'deleted_at': None,
'shares': [
{
'id': '02a32f06e-14f2-45a5-9631-7483f1937bd8',
'deleted': False,
'source_cgsnapshot_member_id':
'03e2f06e-14f2-45a5-9631-0949d1937bd8',
},
],
'share_types': [
{
'id': 'f6aa3b56-45a5-9631-02a32f06e1937b',
'deleted': False,
'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f',
},
],
'id': 'eda52174-0442-476d-9694-a58327466c14',
'name': None
}
return cg_dict
def get_fake_collated_cg_snap_info():
fake_collated_cg_snap_info = [
{
'share': {
'id': '02a32f06e-14f2-45a5-9631-7483f1937bd8',
'deleted': False,
'source_cgsnapshot_member_id':
'03e2f06e-14f2-45a5-9631-0949d1937bd8',
},
'snapshot': {
'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8',
},
},
]
return fake_collated_cg_snap_info
@ddt.ddt
class GenericShareDriverTestCase(test.TestCase):
"""Tests GenericShareDriver."""
def setUp(self):
super(GenericShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
self._execute = mock.Mock(return_value=('', ''))
self._helper_cifs = mock.Mock()
self._helper_nfs = mock.Mock()
CONF.set_default('driver_handles_share_servers', True)
self.fake_conf = manila.share.configuration.Configuration(None)
self.fake_private_storage = mock.Mock()
self.mock_object(self.fake_private_storage, 'get',
mock.Mock(return_value=None))
with mock.patch.object(
generic.service_instance,
'ServiceInstanceManager',
fake_service_instance.FakeServiceInstanceManager):
self._driver = generic.GenericShareDriver(
private_storage=self.fake_private_storage,
execute=self._execute, configuration=self.fake_conf)
self._driver.service_tenant_id = 'service tenant id'
self._driver.service_network_id = 'service network id'
self._driver.compute_api = fake_compute.API()
self._driver.volume_api = fake_volume.API()
self._driver.share_networks_locks = {}
self._driver.get_service_instance = mock.Mock()
self._driver.share_networks_servers = {}
self._driver.admin_context = self._context
self.fake_sn = {"id": "fake_sn_id"}
self.fake_net_info = {
"id": "fake_srv_id",
"share_network_id": "fake_sn_id"
}
fsim = fake_service_instance.FakeServiceInstanceManager()
sim = mock.Mock(return_value=fsim)
self._driver.instance_manager = sim
self._driver.service_instance_manager = sim
self.fake_server = sim._create_service_instance(
context="fake", instance_name="fake",
share_network_id=self.fake_sn["id"], old_server_ip="fake")
self.mock_object(utils, 'synchronized',
mock.Mock(return_value=lambda f: f))
self.mock_object(generic.os.path, 'exists',
mock.Mock(return_value=True))
self._driver._helpers = {
'CIFS': self._helper_cifs,
'NFS': self._helper_nfs,
}
self.share = fake_share.fake_share(share_proto='NFS')
self.server = {
'instance_id': 'fake_instance_id',
'ip': 'fake_ip',
'username': 'fake_username',
'password': 'fake_password',
'pk_path': 'fake_pk_path',
'backend_details': {
'ip': '1.2.3.4',
'instance_id': 'fake',
'service_ip': 'fake_ip',
},
'availability_zone': 'fake_az',
}
self.access = fake_share.fake_access()
self.snapshot = fake_share.fake_snapshot()
self.mock_object(time, 'sleep')
self.mock_debug_log = self.mock_object(generic.LOG, 'debug')
self.mock_warning_log = self.mock_object(generic.LOG, 'warning')
self.mock_error_log = self.mock_object(generic.LOG, 'error')
self.mock_exception_log = self.mock_object(generic.LOG, 'exception')
def test_do_setup(self):
self.mock_object(volume, 'API')
self.mock_object(compute, 'API')
self.mock_object(self._driver, '_setup_helpers')
self._driver.do_setup(self._context)
volume.API.assert_called_once_with()
compute.API.assert_called_once_with()
self._driver._setup_helpers.assert_called_once_with()
def test_setup_helpers(self):
self._driver._helpers = {}
CONF.set_default('share_helpers', ['NFS=fakenfs'])
self.mock_object(generic.importutils, 'import_class',
mock.Mock(return_value=self._helper_nfs))
self._driver._setup_helpers()
generic.importutils.import_class.assert_has_calls([
mock.call('fakenfs')
])
self._helper_nfs.assert_called_once_with(
self._execute,
self._driver._ssh_exec,
self.fake_conf
)
self.assertEqual(len(self._driver._helpers), 1)
def test_setup_helpers_no_helpers(self):
self._driver._helpers = {}
CONF.set_default('share_helpers', [])
self.assertRaises(exception.ManilaException,
self._driver._setup_helpers)
def test__get_access_rule_for_data_copy_dhss_true(self):
get_access_return = {
'access_level': 'rw',
'access_to': 'fake_ip',
'access_type': 'ip'
}
result = self._driver._get_access_rule_for_data_copy(
self._context, self.share, self.server)
self.assertEqual(get_access_return, result)
def test__get_access_rule_for_data_copy_dhss_false(self):
get_access_return = {
'access_level': 'rw',
'access_to': 'fake_ip',
'access_type': 'ip'
}
CONF.set_default('driver_handles_share_servers', False)
CONF.set_default('migration_data_copy_node_ip', 'fake_ip')
result = self._driver._get_access_rule_for_data_copy(
self._context, self.share, self.server)
self.assertEqual(get_access_return, result)
def test_create_share(self):
volume = 'fake_volume'
volume2 = 'fake_volume2'
self._helper_nfs.create_export.return_value = 'fakelocation'
self.mock_object(self._driver, '_allocate_container',
mock.Mock(return_value=volume))
self.mock_object(self._driver, '_attach_volume',
mock.Mock(return_value=volume2))
self.mock_object(self._driver, '_format_device')
self.mock_object(self._driver, '_mount_device')
result = self._driver.create_share(
self._context, self.share, share_server=self.server)
self.assertEqual(result, 'fakelocation')
self._driver._allocate_container.assert_called_once_with(
self._driver.admin_context, self.share)
self._driver._attach_volume.assert_called_once_with(
self._driver.admin_context, self.share,
self.server['backend_details']['instance_id'],
volume)
self._driver._format_device.assert_called_once_with(
self.server['backend_details'], volume2)
self._driver._mount_device.assert_called_once_with(
self.share, self.server['backend_details'], volume2)
def test_create_share_exception(self):
share = fake_share.fake_share(share_network_id=None)
self.assertRaises(exception.ManilaException, self._driver.create_share,
self._context, share)
def test_create_share_invalid_helper(self):
self._driver._helpers = {'CIFS': self._helper_cifs}
self.assertRaises(exception.InvalidShare, self._driver.create_share,
self._context, self.share, share_server=self.server)
def test_format_device(self):
volume = {'mountpoint': 'fake_mount_point'}
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(return_value=('', '')))
self._driver._format_device(self.server, volume)
self._driver._ssh_exec.assert_called_once_with(
self.server,
['sudo', 'mkfs.%s' % self.fake_conf.share_volume_fstype,
volume['mountpoint']])
def test_mount_device_not_present(self):
server = {'instance_id': 'fake_server_id'}
mount_path = self._driver._get_mount_path(self.share)
volume = {'mountpoint': 'fake_mount_point'}
self.mock_object(self._driver, '_is_device_mounted',
mock.Mock(return_value=False))
self.mock_object(self._driver, '_sync_mount_temp_and_perm_files')
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(return_value=('', '')))
self._driver._mount_device(self.share, server, volume)
self._driver._is_device_mounted.assert_called_once_with(
mount_path, server, volume)
self._driver._sync_mount_temp_and_perm_files.assert_called_once_with(
server)
self._driver._ssh_exec.assert_called_once_with(
server,
['sudo mkdir -p', mount_path,
'&&', 'sudo mount', volume['mountpoint'], mount_path,
'&& sudo chmod 777', mount_path],
)
def test_mount_device_present(self):
mount_path = '/fake/mount/path'
volume = {'mountpoint': 'fake_mount_point'}
self.mock_object(self._driver, '_is_device_mounted',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_get_mount_path',
mock.Mock(return_value=mount_path))
self.mock_object(generic.LOG, 'warning')
self._driver._mount_device(self.share, self.server, volume)
self._driver._get_mount_path.assert_called_once_with(self.share)
self._driver._is_device_mounted.assert_called_once_with(
mount_path, self.server, volume)
generic.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY)
def test_mount_device_exception_raised(self):
volume = {'mountpoint': 'fake_mount_point'}
self.mock_object(
self._driver, '_is_device_mounted',
mock.Mock(side_effect=exception.ProcessExecutionError))
self.assertRaises(
exception.ShareBackendException,
self._driver._mount_device,
self.share,
self.server,
volume,
)
self._driver._is_device_mounted.assert_called_once_with(
self._driver._get_mount_path(self.share), self.server, volume)
def test_unmount_device_present(self):
mount_path = '/fake/mount/path'
self.mock_object(self._driver, '_is_device_mounted',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_sync_mount_temp_and_perm_files')
self.mock_object(self._driver, '_get_mount_path',
mock.Mock(return_value=mount_path))
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(return_value=('', '')))
self._driver._unmount_device(self.share, self.server)
self._driver._get_mount_path.assert_called_once_with(self.share)
self._driver._is_device_mounted.assert_called_once_with(
mount_path, self.server)
self._driver._sync_mount_temp_and_perm_files.assert_called_once_with(
self.server)
self._driver._ssh_exec.assert_called_once_with(
self.server,
['sudo umount', mount_path, '&& sudo rmdir', mount_path],
)
def test_unmount_device_retry_once(self):
self.counter = 0
def _side_effect(*args):
self.counter += 1
if self.counter < 2:
raise exception.ProcessExecutionError
mount_path = '/fake/mount/path'
self.mock_object(self._driver, '_is_device_mounted',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_sync_mount_temp_and_perm_files')
self.mock_object(self._driver, '_get_mount_path',
mock.Mock(return_value=mount_path))
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(side_effect=_side_effect))
self._driver._unmount_device(self.share, self.server)
self.assertEqual(1, time.sleep.call_count)
self.assertEqual(self._driver._get_mount_path.mock_calls,
[mock.call(self.share) for i in moves.range(2)])
self.assertEqual(self._driver._is_device_mounted.mock_calls,
[mock.call(mount_path,
self.server) for i in moves.range(2)])
self._driver._sync_mount_temp_and_perm_files.assert_called_once_with(
self.server)
self.assertEqual(
self._driver._ssh_exec.mock_calls,
[mock.call(self.server, ['sudo umount', mount_path,
'&& sudo rmdir', mount_path])
for i in moves.range(2)]
)
def test_unmount_device_not_present(self):
mount_path = '/fake/mount/path'
self.mock_object(self._driver, '_is_device_mounted',
mock.Mock(return_value=False))
self.mock_object(self._driver, '_get_mount_path',
mock.Mock(return_value=mount_path))
self.mock_object(generic.LOG, 'warning')
self._driver._unmount_device(self.share, self.server)
self._driver._get_mount_path.assert_called_once_with(self.share)
self._driver._is_device_mounted.assert_called_once_with(
mount_path, self.server)
generic.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY)
def test_is_device_mounted_true(self):
volume = {'mountpoint': 'fake_mount_point', 'id': 'fake_id'}
mount_path = '/fake/mount/path'
mounts = "%(dev)s on %(path)s" % {'dev': volume['mountpoint'],
'path': mount_path}
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(return_value=(mounts, '')))
result = self._driver._is_device_mounted(
mount_path, self.server, volume)
self._driver._ssh_exec.assert_called_once_with(
self.server, ['sudo', 'mount'])
self.assertEqual(result, True)
def test_is_device_mounted_true_no_volume_provided(self):
mount_path = '/fake/mount/path'
mounts = "/fake/dev/path on %(path)s type fake" % {'path': mount_path}
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(return_value=(mounts, '')))
result = self._driver._is_device_mounted(mount_path, self.server)
self._driver._ssh_exec.assert_called_once_with(
self.server, ['sudo', 'mount'])
self.assertEqual(result, True)
def test_is_device_mounted_false(self):
mount_path = '/fake/mount/path'
volume = {'mountpoint': 'fake_mount_point', 'id': 'fake_id'}
mounts = "%(dev)s on %(path)s" % {'dev': '/fake',
'path': mount_path}
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(return_value=(mounts, '')))
result = self._driver._is_device_mounted(
mount_path, self.server, volume)
self._driver._ssh_exec.assert_called_once_with(
self.server, ['sudo', 'mount'])
self.assertEqual(result, False)
def test_is_device_mounted_false_no_volume_provided(self):
mount_path = '/fake/mount/path'
mounts = "%(path)s" % {'path': 'fake'}
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(return_value=(mounts, '')))
self.mock_object(self._driver, '_get_mount_path',
mock.Mock(return_value=mount_path))
result = self._driver._is_device_mounted(mount_path, self.server)
self._driver._ssh_exec.assert_called_once_with(
self.server, ['sudo', 'mount'])
self.assertEqual(result, False)
def test_sync_mount_temp_and_perm_files(self):
self.mock_object(self._driver, '_ssh_exec')
self._driver._sync_mount_temp_and_perm_files(self.server)
self._driver._ssh_exec.has_calls(
mock.call(
self.server,
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE]),
mock.call(self.server, ['sudo', 'mount', '-a']))
def test_sync_mount_temp_and_perm_files_raise_error_on_copy(self):
self.mock_object(
self._driver, '_ssh_exec',
mock.Mock(side_effect=exception.ProcessExecutionError))
self.assertRaises(
exception.ShareBackendException,
self._driver._sync_mount_temp_and_perm_files,
self.server
)
self._driver._ssh_exec.assert_called_once_with(
self.server,
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE])
def test_sync_mount_temp_and_perm_files_raise_error_on_mount(self):
def raise_error_on_mount(*args, **kwargs):
if args[1][1] == 'cp':
raise exception.ProcessExecutionError()
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(side_effect=raise_error_on_mount))
self.assertRaises(
exception.ShareBackendException,
self._driver._sync_mount_temp_and_perm_files,
self.server
)
self._driver._ssh_exec.has_calls(
mock.call(
self.server,
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE]),
mock.call(self.server, ['sudo', 'mount', '-a']))
def test_get_mount_path(self):
result = self._driver._get_mount_path(self.share)
self.assertEqual(result, os.path.join(CONF.share_mount_path,
self.share['name']))
def test_attach_volume_not_attached(self):
available_volume = fake_volume.FakeVolume()
attached_volume = fake_volume.FakeVolume(status='in-use')
self.mock_object(self._driver.compute_api, 'instance_volume_attach')
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(return_value=attached_volume))
result = self._driver._attach_volume(self._context, self.share,
'fake_inst_id', available_volume)
self._driver.compute_api.instance_volume_attach.\
assert_called_once_with(self._context, 'fake_inst_id',
available_volume['id'])
self._driver.volume_api.get.assert_called_once_with(
self._context, attached_volume['id'])
self.assertEqual(result, attached_volume)
def test_attach_volume_attached_correct(self):
fake_server = fake_compute.FakeServer()
attached_volume = fake_volume.FakeVolume(status='in-use')
self.mock_object(self._driver.compute_api, 'instance_volumes_list',
mock.Mock(return_value=[attached_volume]))
result = self._driver._attach_volume(self._context, self.share,
fake_server, attached_volume)
self.assertEqual(result, attached_volume)
def test_attach_volume_attached_incorrect(self):
fake_server = fake_compute.FakeServer()
attached_volume = fake_volume.FakeVolume(status='in-use')
anoter_volume = fake_volume.FakeVolume(id='fake_id2', status='in-use')
self.mock_object(self._driver.compute_api, 'instance_volumes_list',
mock.Mock(return_value=[anoter_volume]))
self.assertRaises(exception.ManilaException,
self._driver._attach_volume, self._context,
self.share, fake_server, attached_volume)
@ddt.data(exception.ManilaException, exception.Invalid)
def test_attach_volume_failed_attach(self, side_effect):
fake_server = fake_compute.FakeServer()
available_volume = fake_volume.FakeVolume()
self.mock_object(self._driver.compute_api, 'instance_volume_attach',
mock.Mock(side_effect=side_effect))
self.assertRaises(exception.ManilaException,
self._driver._attach_volume,
self._context, self.share, fake_server,
available_volume)
self.assertEqual(
3, self._driver.compute_api.instance_volume_attach.call_count)
def test_attach_volume_attached_retry_correct(self):
fake_server = fake_compute.FakeServer()
attached_volume = fake_volume.FakeVolume(status='available')
in_use_volume = fake_volume.FakeVolume(status='in-use')
side_effect = [exception.Invalid("Fake"), attached_volume]
attach_mock = mock.Mock(side_effect=side_effect)
self.mock_object(self._driver.compute_api, 'instance_volume_attach',
attach_mock)
self.mock_object(self._driver.compute_api, 'instance_volumes_list',
mock.Mock(return_value=[attached_volume]))
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(return_value=in_use_volume))
result = self._driver._attach_volume(self._context, self.share,
fake_server, attached_volume)
self.assertEqual(result, in_use_volume)
self.assertEqual(
2, self._driver.compute_api.instance_volume_attach.call_count)
def test_attach_volume_error(self):
fake_server = fake_compute.FakeServer()
available_volume = fake_volume.FakeVolume()
error_volume = fake_volume.FakeVolume(status='error')
self.mock_object(self._driver.compute_api, 'instance_volume_attach')
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(return_value=error_volume))
self.assertRaises(exception.ManilaException,
self._driver._attach_volume,
self._context, self.share,
fake_server, available_volume)
def test_get_volume(self):
volume = fake_volume.FakeVolume(
name=CONF.volume_name_template % self.share['id'])
self.mock_object(self._driver.volume_api, 'get_all',
mock.Mock(return_value=[volume]))
result = self._driver._get_volume(self._context, self.share['id'])
self.assertEqual(result, volume)
self._driver.volume_api.get_all.assert_called_once_with(
self._context, {'all_tenants': True, 'name': volume['name']})
def test_get_volume_with_private_data(self):
volume = fake_volume.FakeVolume()
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(return_value=volume))
self.mock_object(self.fake_private_storage, 'get',
mock.Mock(return_value=volume['id']))
result = self._driver._get_volume(self._context, self.share['id'])
self.assertEqual(result, volume)
self._driver.volume_api.get.assert_called_once_with(
self._context, volume['id'])
self.fake_private_storage.get.assert_called_once_with(
self.share['id'], 'volume_id'
)
def test_get_volume_none(self):
vol_name = (
self._driver.configuration.volume_name_template % self.share['id'])
self.mock_object(self._driver.volume_api, 'get_all',
mock.Mock(return_value=[]))
result = self._driver._get_volume(self._context, self.share['id'])
self.assertIsNone(result)
self._driver.volume_api.get_all.assert_called_once_with(
self._context, {'all_tenants': True, 'name': vol_name})
def test_get_volume_error(self):
volume = fake_volume.FakeVolume(
name=CONF.volume_name_template % self.share['id'])
self.mock_object(self._driver.volume_api, 'get_all',
mock.Mock(return_value=[volume, volume]))
self.assertRaises(exception.ManilaException,
self._driver._get_volume,
self._context, self.share['id'])
self._driver.volume_api.get_all.assert_called_once_with(
self._context, {'all_tenants': True, 'name': volume['name']})
def test_get_volume_snapshot(self):
volume_snapshot = fake_volume.FakeVolumeSnapshot(
name=self._driver.configuration.volume_snapshot_name_template %
self.snapshot['id'])
self.mock_object(self._driver.volume_api, 'get_all_snapshots',
mock.Mock(return_value=[volume_snapshot]))
result = self._driver._get_volume_snapshot(self._context,
self.snapshot['id'])
self.assertEqual(result, volume_snapshot)
self._driver.volume_api.get_all_snapshots.assert_called_once_with(
self._context, {'name': volume_snapshot['name']})
def test_get_volume_snapshot_with_private_data(self):
volume_snapshot = fake_volume.FakeVolumeSnapshot()
self.mock_object(self._driver.volume_api, 'get_snapshot',
mock.Mock(return_value=volume_snapshot))
self.mock_object(self.fake_private_storage, 'get',
mock.Mock(return_value=volume_snapshot['id']))
result = self._driver._get_volume_snapshot(self._context,
self.snapshot['id'])
self.assertEqual(result, volume_snapshot)
self._driver.volume_api.get_snapshot.assert_called_once_with(
self._context, volume_snapshot['id'])
self.fake_private_storage.get.assert_called_once_with(
self.snapshot['id'], 'volume_snapshot_id'
)
def test_get_volume_snapshot_none(self):
snap_name = (
self._driver.configuration.volume_snapshot_name_template %
self.share['id'])
self.mock_object(self._driver.volume_api, 'get_all_snapshots',
mock.Mock(return_value=[]))
result = self._driver._get_volume_snapshot(self._context,
self.share['id'])
self.assertIsNone(result)
self._driver.volume_api.get_all_snapshots.assert_called_once_with(
self._context, {'name': snap_name})
def test_get_volume_snapshot_error(self):
volume_snapshot = fake_volume.FakeVolumeSnapshot(
name=self._driver.configuration.volume_snapshot_name_template %
self.snapshot['id'])
self.mock_object(
self._driver.volume_api, 'get_all_snapshots',
mock.Mock(return_value=[volume_snapshot, volume_snapshot]))
self.assertRaises(
exception.ManilaException, self._driver._get_volume_snapshot,
self._context, self.snapshot['id'])
self._driver.volume_api.get_all_snapshots.assert_called_once_with(
self._context, {'name': volume_snapshot['name']})
def test_detach_volume(self):
available_volume = fake_volume.FakeVolume()
attached_volume = fake_volume.FakeVolume(status='in-use')
self.mock_object(self._driver, '_get_volume',
mock.Mock(return_value=attached_volume))
self.mock_object(self._driver.compute_api, 'instance_volumes_list',
mock.Mock(return_value=[attached_volume]))
self.mock_object(self._driver.compute_api, 'instance_volume_detach')
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(return_value=available_volume))
self._driver._detach_volume(self._context, self.share,
self.server['backend_details'])
self._driver.compute_api.instance_volume_detach.\
assert_called_once_with(
self._context,
self.server['backend_details']['instance_id'],
available_volume['id'])
self._driver.volume_api.get.assert_called_once_with(
self._context, available_volume['id'])
def test_detach_volume_detached(self):
available_volume = fake_volume.FakeVolume()
attached_volume = fake_volume.FakeVolume(status='in-use')
self.mock_object(self._driver, '_get_volume',
mock.Mock(return_value=attached_volume))
self.mock_object(self._driver.compute_api, 'instance_volumes_list',
mock.Mock(return_value=[]))
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(return_value=available_volume))
self.mock_object(self._driver.compute_api, 'instance_volume_detach')
self._driver._detach_volume(self._context, self.share,
self.server['backend_details'])
self.assertFalse(self._driver.volume_api.get.called)
self.assertFalse(
self._driver.compute_api.instance_volume_detach.called)
def test_allocate_container(self):
fake_vol = fake_volume.FakeVolume()
self.fake_conf.cinder_volume_type = 'fake_volume_type'
self.mock_object(self._driver.volume_api, 'create',
mock.Mock(return_value=fake_vol))
result = self._driver._allocate_container(self._context, self.share)
self.assertEqual(result, fake_vol)
self._driver.volume_api.create.assert_called_once_with(
self._context,
self.share['size'],
CONF.volume_name_template % self.share['id'],
'',
snapshot=None,
volume_type='fake_volume_type',
availability_zone=self.share['availability_zone'])
def test_allocate_container_with_snaphot(self):
fake_vol = fake_volume.FakeVolume()
fake_vol_snap = fake_volume.FakeVolumeSnapshot()
self.mock_object(self._driver, '_get_volume_snapshot',
mock.Mock(return_value=fake_vol_snap))
self.mock_object(self._driver.volume_api, 'create',
mock.Mock(return_value=fake_vol))
result = self._driver._allocate_container(self._context,
self.share,
self.snapshot)
self.assertEqual(result, fake_vol)
self._driver.volume_api.create.assert_called_once_with(
self._context,
self.share['size'],
CONF.volume_name_template % self.share['id'],
'',
snapshot=fake_vol_snap,
volume_type=None,
availability_zone=self.share['availability_zone'])
def test_allocate_container_error(self):
fake_vol = fake_volume.FakeVolume(status='error')
self.mock_object(self._driver.volume_api, 'create',
mock.Mock(return_value=fake_vol))
self.assertRaises(exception.ManilaException,
self._driver._allocate_container,
self._context,
self.share)
def test_wait_for_available_volume(self):
fake_volume = {'status': 'creating', 'id': 'fake'}
fake_available_volume = {'status': 'available', 'id': 'fake'}
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(return_value=fake_available_volume))
actual_result = self._driver._wait_for_available_volume(
fake_volume, 5, "error", "timeout")
self.assertEqual(fake_available_volume, actual_result)
self._driver.volume_api.get.assert_called_once_with(
mock.ANY, fake_volume['id'])
@mock.patch('time.sleep')
def test_wait_for_available_volume_error_extending(self, mock_sleep):
fake_volume = {'status': 'error_extending', 'id': 'fake'}
self.assertRaises(exception.ManilaException,
self._driver._wait_for_available_volume,
fake_volume, 5, 'error', 'timeout')
self.assertFalse(mock_sleep.called)
@mock.patch('time.sleep')
def test_wait_for_extending_volume(self, mock_sleep):
initial_size = 1
expected_size = 2
mock_volume = fake_volume.FakeVolume(status='available',
size=initial_size)
mock_extending_vol = fake_volume.FakeVolume(status='extending',
size=initial_size)
mock_extended_vol = fake_volume.FakeVolume(status='available',
size=expected_size)
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(side_effect=[mock_extending_vol,
mock_extended_vol]))
result = self._driver._wait_for_available_volume(
mock_volume, 5, "error", "timeout",
expected_size=expected_size)
expected_get_count = 2
self.assertEqual(mock_extended_vol, result)
self._driver.volume_api.get.assert_has_calls(
[mock.call(self._driver.admin_context, mock_volume['id'])] *
expected_get_count)
mock_sleep.assert_has_calls([mock.call(1)] * expected_get_count)
@ddt.data(mock.Mock(return_value={'status': 'creating', 'id': 'fake'}),
mock.Mock(return_value={'status': 'error', 'id': 'fake'}))
def test_wait_for_available_volume_invalid(self, volume_get_mock):
fake_volume = {'status': 'creating', 'id': 'fake'}
self.mock_object(self._driver.volume_api, 'get', volume_get_mock)
self.mock_object(time, 'time',
mock.Mock(side_effect=[1.0, 1.33, 1.67, 2.0]))
self.assertRaises(
exception.ManilaException,
self._driver._wait_for_available_volume,
fake_volume, 1, "error", "timeout"
)
def test_deallocate_container(self):
fake_vol = fake_volume.FakeVolume()
self.mock_object(self._driver, '_get_volume',
mock.Mock(return_value=fake_vol))
self.mock_object(self._driver.volume_api, 'delete')
self.mock_object(self._driver.volume_api, 'get', mock.Mock(
side_effect=exception.VolumeNotFound(volume_id=fake_vol['id'])))
self._driver._deallocate_container(self._context, self.share)
self._driver._get_volume.assert_called_once_with(
self._context, self.share['id'])
self._driver.volume_api.delete.assert_called_once_with(
self._context, fake_vol['id'])
self._driver.volume_api.get.assert_called_once_with(
self._context, fake_vol['id'])
def test_deallocate_container_with_volume_not_found(self):
fake_vol = fake_volume.FakeVolume()
self.mock_object(self._driver, '_get_volume',
mock.Mock(side_effect=exception.VolumeNotFound(
volume_id=fake_vol['id'])))
self.mock_object(self._driver.volume_api, 'delete')
self._driver._deallocate_container(self._context, self.share)
self._driver._get_volume.assert_called_once_with(
self._context, self.share['id'])
self.assertFalse(self._driver.volume_api.delete.called)
def test_create_share_from_snapshot(self):
vol1 = 'fake_vol1'
vol2 = 'fake_vol2'
self._helper_nfs.create_export.return_value = 'fakelocation'
self.mock_object(self._driver, '_allocate_container',
mock.Mock(return_value=vol1))
self.mock_object(self._driver, '_attach_volume',
mock.Mock(return_value=vol2))
self.mock_object(self._driver, '_mount_device')
result = self._driver.create_share_from_snapshot(
self._context,
self.share,
self.snapshot,
share_server=self.server)
self.assertEqual(result, 'fakelocation')
self._driver._allocate_container.assert_called_once_with(
self._driver.admin_context, self.share, self.snapshot)
self._driver._attach_volume.assert_called_once_with(
self._driver.admin_context, self.share,
self.server['backend_details']['instance_id'], vol1)
self._driver._mount_device.assert_called_once_with(
self.share, self.server['backend_details'], vol2)
self._helper_nfs.create_export.assert_called_once_with(
self.server['backend_details'], self.share['name'])
def test_create_share_from_snapshot_invalid_helper(self):
self._driver._helpers = {'CIFS': self._helper_cifs}
self.assertRaises(exception.InvalidShare,
self._driver.create_share_from_snapshot,
self._context, self.share, self.snapshot,
share_server=self.server)
def test_delete_share_no_share_servers_handling(self):
self.mock_object(self._driver, '_deallocate_container')
self.mock_object(
self._driver.service_instance_manager,
'get_common_server', mock.Mock(return_value=self.server))
self.mock_object(
self._driver.service_instance_manager,
'ensure_service_instance', mock.Mock(return_value=False))
CONF.set_default('driver_handles_share_servers', False)
self._driver.delete_share(self._context, self.share)
self._driver.service_instance_manager.get_common_server.\
assert_called_once_with()
self._driver._deallocate_container.assert_called_once_with(
self._driver.admin_context, self.share)
self._driver.service_instance_manager.ensure_service_instance.\
assert_called_once_with(
self._context, self.server['backend_details'])
def test_delete_share(self):
self.mock_object(self._driver, '_unmount_device')
self.mock_object(self._driver, '_detach_volume')
self.mock_object(self._driver, '_deallocate_container')
self._driver.delete_share(
self._context, self.share, share_server=self.server)
self._helper_nfs.remove_export.assert_called_once_with(
self.server['backend_details'], self.share['name'])
self._driver._unmount_device.assert_called_once_with(
self.share, self.server['backend_details'])
self._driver._detach_volume.assert_called_once_with(
self._driver.admin_context, self.share,
self.server['backend_details'])
self._driver._deallocate_container.assert_called_once_with(
self._driver.admin_context, self.share)
self._driver.service_instance_manager.ensure_service_instance.\
assert_called_once_with(
self._context, self.server['backend_details'])
def test_delete_share_without_share_server(self):
self.mock_object(self._driver, '_unmount_device')
self.mock_object(self._driver, '_detach_volume')
self.mock_object(self._driver, '_deallocate_container')
self._driver.delete_share(
self._context, self.share, share_server=None)
self.assertFalse(self._helper_nfs.remove_export.called)
self.assertFalse(self._driver._unmount_device.called)
self.assertFalse(self._driver._detach_volume.called)
self._driver._deallocate_container.assert_called_once_with(
self._driver.admin_context, self.share)
def test_delete_share_without_server_backend_details(self):
self.mock_object(self._driver, '_unmount_device')
self.mock_object(self._driver, '_detach_volume')
self.mock_object(self._driver, '_deallocate_container')
fake_share_server = {
'instance_id': 'fake_instance_id',
'ip': 'fake_ip',
'username': 'fake_username',
'password': 'fake_password',
'pk_path': 'fake_pk_path',
'backend_details': {}
}
self._driver.delete_share(
self._context, self.share, share_server=fake_share_server)
self.assertFalse(self._helper_nfs.remove_export.called)
self.assertFalse(self._driver._unmount_device.called)
self.assertFalse(self._driver._detach_volume.called)
self._driver._deallocate_container.assert_called_once_with(
self._driver.admin_context, self.share)
def test_delete_share_without_server_availability(self):
self.mock_object(self._driver, '_unmount_device')
self.mock_object(self._driver, '_detach_volume')
self.mock_object(self._driver, '_deallocate_container')
self.mock_object(
self._driver.service_instance_manager,
'ensure_service_instance', mock.Mock(return_value=False))
self._driver.delete_share(
self._context, self.share, share_server=self.server)
self.assertFalse(self._helper_nfs.remove_export.called)
self.assertFalse(self._driver._unmount_device.called)
self.assertFalse(self._driver._detach_volume.called)
self._driver._deallocate_container.assert_called_once_with(
self._driver.admin_context, self.share)
self._driver.service_instance_manager.ensure_service_instance.\
assert_called_once_with(
self._context, self.server['backend_details'])
def test_delete_share_invalid_helper(self):
self._driver._helpers = {'CIFS': self._helper_cifs}
self.assertRaises(exception.InvalidShare,
self._driver.delete_share,
self._context, self.share, share_server=self.server)
def test_create_snapshot(self):
fake_vol = fake_volume.FakeVolume()
fake_vol_snap = fake_volume.FakeVolumeSnapshot(share_id=fake_vol['id'])
self.mock_object(self._driver, '_get_volume',
mock.Mock(return_value=fake_vol))
self.mock_object(self._driver.volume_api, 'create_snapshot_force',
mock.Mock(return_value=fake_vol_snap))
self._driver.create_snapshot(self._context, fake_vol_snap,
share_server=self.server)
self._driver._get_volume.assert_called_once_with(
self._driver.admin_context, fake_vol_snap['share_id'])
self._driver.volume_api.create_snapshot_force.assert_called_once_with(
self._context,
fake_vol['id'],
CONF.volume_snapshot_name_template % fake_vol_snap['id'],
''
)
def test_delete_snapshot(self):
fake_vol_snap = fake_volume.FakeVolumeSnapshot()
fake_vol_snap2 = {'id': 'fake_vol_snap2'}
self.mock_object(self._driver, '_get_volume_snapshot',
mock.Mock(return_value=fake_vol_snap2))
self.mock_object(self._driver.volume_api, 'delete_snapshot')
self.mock_object(
self._driver.volume_api, 'get_snapshot',
mock.Mock(side_effect=exception.VolumeSnapshotNotFound(
snapshot_id=fake_vol_snap['id'])))
self._driver.delete_snapshot(self._context, fake_vol_snap,
share_server=self.server)
self._driver._get_volume_snapshot.assert_called_once_with(
self._driver.admin_context, fake_vol_snap['id'])
self._driver.volume_api.delete_snapshot.assert_called_once_with(
self._driver.admin_context, fake_vol_snap2['id'])
self._driver.volume_api.get_snapshot.assert_called_once_with(
self._driver.admin_context, fake_vol_snap2['id'])
def test_ensure_share(self):
vol1 = 'fake_vol1'
vol2 = 'fake_vol2'
self._helper_nfs.create_export.return_value = 'fakelocation'
self.mock_object(self._driver, '_get_volume',
mock.Mock(return_value=vol1))
self.mock_object(self._driver, '_attach_volume',
mock.Mock(return_value=vol2))
self.mock_object(self._driver, '_mount_device')
self._driver.ensure_share(
self._context, self.share, share_server=self.server)
self._driver._get_volume.assert_called_once_with(
self._context, self.share['id'])
self._driver._attach_volume.assert_called_once_with(
self._context, self.share,
self.server['backend_details']['instance_id'], vol1)
self._driver._mount_device.assert_called_once_with(
self.share, self.server['backend_details'], vol2)
self._helper_nfs.create_export.assert_called_once_with(
self.server['backend_details'], self.share['name'], recreate=True)
def test_ensure_share_volume_is_absent(self):
self.mock_object(
self._driver, '_get_volume', mock.Mock(return_value=None))
self.mock_object(self._driver, '_attach_volume')
self._driver.ensure_share(
self._context, self.share, share_server=self.server)
self._driver._get_volume.assert_called_once_with(
self._context, self.share['id'])
self.assertFalse(self._driver._attach_volume.called)
def test_ensure_share_invalid_helper(self):
self._driver._helpers = {'CIFS': self._helper_cifs}
self.assertRaises(exception.InvalidShare, self._driver.ensure_share,
self._context, self.share, share_server=self.server)
@ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO)
def test_allow_access(self, access_level):
access = {
'access_type': 'ip',
'access_to': 'fake_dest',
'access_level': access_level,
}
self._driver.allow_access(
self._context, self.share, access, share_server=self.server)
self._driver._helpers[self.share['share_proto']].\
allow_access.assert_called_once_with(
self.server['backend_details'], self.share['name'],
access['access_type'], access['access_level'],
access['access_to'])
def test_allow_access_unsupported(self):
access = {
'access_type': 'ip',
'access_to': 'fake_dest',
'access_level': 'fakefoobar',
}
self.assertRaises(
exception.InvalidShareAccessLevel,
self._driver.allow_access,
self._context, self.share, access, share_server=self.server)
def test_deny_access(self):
access = 'fake_access'
self._driver.deny_access(
self._context, self.share, access, share_server=self.server)
self._driver._helpers[
self.share['share_proto']].deny_access.assert_called_once_with(
self.server['backend_details'], self.share['name'], access)
@ddt.data(fake_share.fake_share(),
fake_share.fake_share(share_proto='NFSBOGUS'),
fake_share.fake_share(share_proto='CIFSBOGUS'))
def test__get_helper_with_wrong_proto(self, share):
self.assertRaises(exception.InvalidShare,
self._driver._get_helper, share)
def test__setup_server(self):
sim = self._driver.instance_manager
net_info = {
'server_id': 'fake',
'neutron_net_id': 'fake-net-id',
'neutron_subnet_id': 'fake-subnet-id',
}
self._driver.setup_server(net_info)
sim.set_up_service_instance.assert_called_once_with(
self._context, net_info)
def test__setup_server_revert(self):
def raise_exception(*args, **kwargs):
raise exception.ServiceInstanceException
net_info = {'server_id': 'fake',
'neutron_net_id': 'fake-net-id',
'neutron_subnet_id': 'fake-subnet-id'}
self.mock_object(self._driver.service_instance_manager,
'set_up_service_instance',
mock.Mock(side_effect=raise_exception))
self.assertRaises(exception.ServiceInstanceException,
self._driver.setup_server,
net_info)
def test__teardown_server(self):
server_details = {
'instance_id': 'fake_instance_id',
'subnet_id': 'fake_subnet_id',
'router_id': 'fake_router_id',
}
self._driver.teardown_server(server_details)
self._driver.service_instance_manager.delete_service_instance.\
assert_called_once_with(
self._driver.admin_context, server_details)
def test_ssh_exec_connection_not_exist(self):
ssh_output = 'fake_ssh_output'
cmd = ['fake', 'command']
ssh = mock.Mock()
ssh.get_transport = mock.Mock()
ssh.get_transport().is_active = mock.Mock(return_value=True)
ssh_pool = mock.Mock()
ssh_pool.create = mock.Mock(return_value=ssh)
self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool))
self.mock_object(processutils, 'ssh_execute',
mock.Mock(return_value=ssh_output))
self._driver.ssh_connections = {}
result = self._driver._ssh_exec(self.server, cmd)
utils.SSHPool.assert_called_once_with(
self.server['ip'], 22, None, self.server['username'],
self.server['password'], self.server['pk_path'], max_size=1)
ssh_pool.create.assert_called_once_with()
processutils.ssh_execute.assert_called_once_with(ssh, 'fake command')
ssh.get_transport().is_active.assert_called_once_with()
self.assertEqual(
self._driver.ssh_connections,
{self.server['instance_id']: (ssh_pool, ssh)}
)
self.assertEqual(ssh_output, result)
def test_ssh_exec_connection_exist(self):
ssh_output = 'fake_ssh_output'
cmd = ['fake', 'command']
ssh = mock.Mock()
ssh.get_transport = mock.Mock()
ssh.get_transport().is_active = mock.Mock(side_effect=lambda: True)
ssh_pool = mock.Mock()
self.mock_object(processutils, 'ssh_execute',
mock.Mock(return_value=ssh_output))
self._driver.ssh_connections = {
self.server['instance_id']: (ssh_pool, ssh)
}
result = self._driver._ssh_exec(self.server, cmd)
processutils.ssh_execute.assert_called_once_with(ssh, 'fake command')
ssh.get_transport().is_active.assert_called_once_with()
self.assertEqual(
self._driver.ssh_connections,
{self.server['instance_id']: (ssh_pool, ssh)}
)
self.assertEqual(ssh_output, result)
def test_ssh_exec_connection_recreation(self):
ssh_output = 'fake_ssh_output'
cmd = ['fake', 'command']
ssh = mock.Mock()
ssh.get_transport = mock.Mock()
ssh.get_transport().is_active = mock.Mock(side_effect=lambda: False)
ssh_pool = mock.Mock()
ssh_pool.create = mock.Mock(side_effect=lambda: ssh)
ssh_pool.remove = mock.Mock()
self.mock_object(processutils, 'ssh_execute',
mock.Mock(return_value=ssh_output))
self._driver.ssh_connections = {
self.server['instance_id']: (ssh_pool, ssh)
}
result = self._driver._ssh_exec(self.server, cmd)
processutils.ssh_execute.assert_called_once_with(ssh, 'fake command')
ssh.get_transport().is_active.assert_called_once_with()
ssh_pool.create.assert_called_once_with()
ssh_pool.remove.assert_called_once_with(ssh)
self.assertEqual(
self._driver.ssh_connections,
{self.server['instance_id']: (ssh_pool, ssh)}
)
self.assertEqual(ssh_output, result)
def test_get_share_stats_refresh_false(self):
self._driver._stats = {'fake_key': 'fake_value'}
result = self._driver.get_share_stats(False)
self.assertEqual(self._driver._stats, result)
def test_get_share_stats_refresh_true(self):
fake_stats = {'fake_key': 'fake_value'}
self._driver._stats = fake_stats
expected_keys = [
'QoS_support', 'driver_version', 'share_backend_name',
'free_capacity_gb', 'total_capacity_gb',
'driver_handles_share_servers',
'reserved_percentage', 'vendor_name', 'storage_protocol',
]
result = self._driver.get_share_stats(True)
self.assertNotEqual(fake_stats, result)
for key in expected_keys:
self.assertIn(key, result)
self.assertEqual(True, result['driver_handles_share_servers'])
self.assertEqual('Open Source', result['vendor_name'])
def _setup_manage_mocks(self,
get_share_type_extra_specs='False',
is_device_mounted=True,
server_details=None):
CONF.set_default('driver_handles_share_servers', False)
self.mock_object(share_types, 'get_share_type_extra_specs',
mock.Mock(return_value=get_share_type_extra_specs))
self.mock_object(self._driver, '_is_device_mounted',
mock.Mock(return_value=is_device_mounted))
self.mock_object(self._driver, 'service_instance_manager')
server = {'backend_details': server_details}
self.mock_object(self._driver.service_instance_manager,
'get_common_server',
mock.Mock(return_value=server))
def test_manage_invalid_protocol(self):
share = {'share_proto': 'fake_proto'}
self._setup_manage_mocks()
self.assertRaises(exception.InvalidShare,
self._driver.manage_existing, share, {})
def test_manage_not_mounted_share(self):
share = get_fake_manage_share()
fake_path = '/foo/bar'
self._setup_manage_mocks(is_device_mounted=False)
self.mock_object(
self._driver._helpers[share['share_proto']],
'get_share_path_by_export_location',
mock.Mock(return_value=fake_path))
self.assertRaises(exception.ManageInvalidShare,
self._driver.manage_existing, share, {})
self.assertEqual(
1,
self._driver.service_instance_manager.get_common_server.call_count)
self._driver._is_device_mounted.assert_called_once_with(
fake_path, None)
self._driver._helpers[share['share_proto']].\
get_share_path_by_export_location.assert_called_once_with(
None, share['export_locations'][0]['path'])
def test_manage_share_not_attached_to_cinder_volume_invalid_size(self):
share = get_fake_manage_share()
server_details = {}
fake_path = '/foo/bar'
self._setup_manage_mocks(server_details=server_details)
self.mock_object(self._driver, '_get_volume',
mock.Mock(return_value=None))
error = exception.ManageInvalidShare(reason="fake")
self.mock_object(
self._driver, '_get_mounted_share_size',
mock.Mock(side_effect=error))
self.mock_object(
self._driver._helpers[share['share_proto']],
'get_share_path_by_export_location',
mock.Mock(return_value=fake_path))
self.assertRaises(exception.ManageInvalidShare,
self._driver.manage_existing, share, {})
self._driver._get_mounted_share_size.assert_called_once_with(
fake_path, server_details)
self._driver._helpers[share['share_proto']].\
get_share_path_by_export_location.assert_called_once_with(
server_details, share['export_locations'][0]['path'])
def test_manage_share_not_attached_to_cinder_volume(self):
share = get_fake_manage_share()
share_size = "fake"
fake_path = '/foo/bar'
fake_exports = ['foo', 'bar']
server_details = {}
self._setup_manage_mocks(server_details=server_details)
self.mock_object(self._driver, '_get_volume')
self.mock_object(self._driver, '_get_mounted_share_size',
mock.Mock(return_value=share_size))
self.mock_object(
self._driver._helpers[share['share_proto']],
'get_share_path_by_export_location',
mock.Mock(return_value=fake_path))
self.mock_object(
self._driver._helpers[share['share_proto']],
'get_exports_for_share',
mock.Mock(return_value=fake_exports))
result = self._driver.manage_existing(share, {})
self.assertEqual(
{'size': share_size, 'export_locations': fake_exports}, result)
self._driver._helpers[share['share_proto']].get_exports_for_share.\
assert_called_once_with(
server_details, share['export_locations'][0]['path'])
self._driver._helpers[share['share_proto']].\
get_share_path_by_export_location.assert_called_once_with(
server_details, share['export_locations'][0]['path'])
self._driver._get_mounted_share_size.assert_called_once_with(
fake_path, server_details)
self.assertFalse(self._driver._get_volume.called)
def test_manage_share_attached_to_cinder_volume_not_found(self):
share = get_fake_manage_share()
server_details = {}
driver_options = {'volume_id': 'fake'}
self._setup_manage_mocks(server_details=server_details)
self.mock_object(
self._driver.volume_api, 'get',
mock.Mock(side_effect=exception.VolumeNotFound(volume_id="fake"))
)
self.assertRaises(exception.ManageInvalidShare,
self._driver.manage_existing, share, driver_options)
self._driver.volume_api.get.assert_called_once_with(
mock.ANY, driver_options['volume_id'])
def test_manage_share_attached_to_cinder_volume_not_mounted_to_srv(self):
share = get_fake_manage_share()
server_details = {'instance_id': 'fake'}
driver_options = {'volume_id': 'fake'}
volume = {'id': 'fake'}
self._setup_manage_mocks(server_details=server_details)
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(return_value=volume))
self.mock_object(self._driver.compute_api, 'instance_volumes_list',
mock.Mock(return_value=[]))
self.assertRaises(exception.ManageInvalidShare,
self._driver.manage_existing, share, driver_options)
self._driver.volume_api.get.assert_called_once_with(
mock.ANY, driver_options['volume_id'])
self._driver.compute_api.instance_volumes_list.assert_called_once_with(
mock.ANY, server_details['instance_id'])
def test_manage_share_attached_to_cinder_volume(self):
share = get_fake_manage_share()
fake_size = 'foobar'
fake_exports = ['foo', 'bar']
server_details = {'instance_id': 'fake'}
driver_options = {'volume_id': 'fake'}
volume = {'id': 'fake', 'name': 'fake_volume_1', 'size': fake_size}
self._setup_manage_mocks(server_details=server_details)
self.mock_object(self._driver.volume_api, 'get',
mock.Mock(return_value=volume))
self._driver.volume_api.update = mock.Mock()
fake_volume = mock.Mock()
fake_volume.id = 'fake'
self.mock_object(self._driver.compute_api, 'instance_volumes_list',
mock.Mock(return_value=[fake_volume]))
self.mock_object(
self._driver._helpers[share['share_proto']],
'get_exports_for_share',
mock.Mock(return_value=fake_exports))
result = self._driver.manage_existing(share, driver_options)
self.assertEqual(
{'size': fake_size, 'export_locations': fake_exports}, result)
self._driver._helpers[share['share_proto']].get_exports_for_share.\
assert_called_once_with(
server_details, share['export_locations'][0]['path'])
expected_volume_update = {
'name': self._driver._get_volume_name(share['id'])
}
self._driver.volume_api.update.assert_called_once_with(
mock.ANY, volume['id'], expected_volume_update)
self.fake_private_storage.update.assert_called_once_with(
share['id'], {'volume_id': volume['id']}
)
def test_get_mounted_share_size(self):
output = ("Filesystem blocks Used Available Capacity Mounted on\n"
"/dev/fake 1G 1G 1G 4% /shares/share-fake")
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(return_value=(output, '')))
actual_result = self._driver._get_mounted_share_size('/fake/path', {})
self.assertEqual(1, actual_result)
@ddt.data("fake\nfake\n", "fake", "fake\n")
def test_get_mounted_share_size_invalid_output(self, output):
self.mock_object(self._driver, '_ssh_exec',
mock.Mock(return_value=(output, '')))
self.assertRaises(exception.ManageInvalidShare,
self._driver._get_mounted_share_size,
'/fake/path', {})
def test_get_consumed_space(self):
mount_path = "fake_path"
server_details = {}
index = 2
valid_result = 1
self.mock_object(self._driver, '_get_mount_stats_by_index',
mock.Mock(return_value=valid_result * 1024))
actual_result = self._driver._get_consumed_space(
mount_path, server_details)
self.assertEqual(valid_result, actual_result)
self._driver._get_mount_stats_by_index.assert_called_once_with(
mount_path, server_details, index, block_size='M'
)
def test_get_consumed_space_invalid(self):
self.mock_object(
self._driver,
'_get_mount_stats_by_index',
mock.Mock(side_effect=exception.ManilaException("fake"))
)
self.assertRaises(
exception.InvalidShare,
self._driver._get_consumed_space,
"fake", "fake"
)
def test_extend_share(self):
fake_volume = "fake"
fake_share = {
'id': 'fake',
'share_proto': 'NFS',
'name': 'test_share',
}
new_size = 123
srv_details = self.server['backend_details']
self.mock_object(
self._driver.service_instance_manager,
'get_common_server',
mock.Mock(return_value=self.server)
)
self.mock_object(self._driver, '_unmount_device')
self.mock_object(self._driver, '_detach_volume')
self.mock_object(self._driver, '_extend_volume')
self.mock_object(self._driver, '_attach_volume')
self.mock_object(self._driver, '_mount_device')
self.mock_object(self._driver, '_resize_filesystem')
self.mock_object(
self._driver, '_get_volume',
mock.Mock(return_value=fake_volume)
)
CONF.set_default('driver_handles_share_servers', False)
self._driver.extend_share(fake_share, new_size)
self.assertTrue(
self._driver.service_instance_manager.get_common_server.called)
self._driver._unmount_device.assert_called_once_with(
fake_share, srv_details)
self._driver._detach_volume.assert_called_once_with(
mock.ANY, fake_share, srv_details)
self._driver._get_volume.assert_called_once_with(
mock.ANY, fake_share['id'])
self._driver._extend_volume.assert_called_once_with(
mock.ANY, fake_volume, new_size)
self._driver._attach_volume.assert_called_once_with(
mock.ANY, fake_share, srv_details['instance_id'], mock.ANY)
self._helper_nfs.disable_access_for_maintenance.\
assert_called_once_with(srv_details, 'test_share')
self._helper_nfs.restore_access_after_maintenance.\
assert_called_once_with(srv_details, 'test_share')
self.assertTrue(self._driver._resize_filesystem.called)
def test_extend_volume(self):
fake_volume = {'id': 'fake'}
new_size = 123
self.mock_object(self._driver.volume_api, 'extend')
self.mock_object(self._driver, '_wait_for_available_volume')
self._driver._extend_volume(self._context, fake_volume, new_size)
self._driver.volume_api.extend.assert_called_once_with(
self._context, fake_volume['id'], new_size
)
self._driver._wait_for_available_volume.assert_called_once_with(
fake_volume, mock.ANY, msg_timeout=mock.ANY, msg_error=mock.ANY,
expected_size=new_size
)
def test_resize_filesystem(self):
fake_server_details = {'fake': 'fake'}
fake_volume = {'mountpoint': '/dev/fake'}
self.mock_object(self._driver, '_ssh_exec')
self._driver._resize_filesystem(
fake_server_details, fake_volume, new_size=123)
self._driver._ssh_exec.assert_any_call(
fake_server_details, ['sudo', 'fsck', '-pf', '/dev/fake'])
self._driver._ssh_exec.assert_any_call(
fake_server_details,
['sudo', 'resize2fs', '/dev/fake', "%sG" % 123]
)
self.assertEqual(2, self._driver._ssh_exec.call_count)
@ddt.data(
{
'source': processutils.ProcessExecutionError(
stderr="resize2fs: New size smaller than minimum (123456)"),
'target': exception.Invalid
},
{
'source': processutils.ProcessExecutionError(stderr="fake_error"),
'target': exception.ManilaException
}
)
@ddt.unpack
def test_resize_filesystem_invalid_new_size(self, source, target):
fake_server_details = {'fake': 'fake'}
fake_volume = {'mountpoint': '/dev/fake'}
ssh_mock = mock.Mock(side_effect=["fake", source])
self.mock_object(self._driver, '_ssh_exec', ssh_mock)
self.assertRaises(
target,
self._driver._resize_filesystem,
fake_server_details, fake_volume, new_size=123
)
def test_shrink_share_invalid_size(self):
fake_share = {'id': 'fake', 'export_locations': [{'path': 'test'}]}
new_size = 123
self.mock_object(
self._driver.service_instance_manager,
'get_common_server',
mock.Mock(return_value=self.server)
)
self.mock_object(self._driver, '_get_helper')
self.mock_object(self._driver, '_get_consumed_space',
mock.Mock(return_value=200))
CONF.set_default('driver_handles_share_servers', False)
self.assertRaises(
exception.ShareShrinkingPossibleDataLoss,
self._driver.shrink_share,
fake_share,
new_size
)
self._driver._get_helper.assert_called_once_with(fake_share)
self._driver._get_consumed_space.assert_called_once_with(
mock.ANY, self.server['backend_details'])
def _setup_shrink_mocks(self):
share = {'id': 'fake', 'export_locations': [{'path': 'test'}],
'name': 'fake'}
volume = {'id': 'fake'}
new_size = 123
server_details = self.server['backend_details']
self.mock_object(
self._driver.service_instance_manager,
'get_common_server',
mock.Mock(return_value=self.server)
)
helper = mock.Mock()
self.mock_object(self._driver, '_get_helper',
mock.Mock(return_value=helper))
self.mock_object(self._driver, '_get_consumed_space',
mock.Mock(return_value=100))
self.mock_object(self._driver, '_get_volume',
mock.Mock(return_value=volume))
self.mock_object(self._driver, '_unmount_device')
self.mock_object(self._driver, '_mount_device')
CONF.set_default('driver_handles_share_servers', False)
return share, volume, new_size, server_details, helper
@ddt.data({'source': exception.Invalid("fake"),
'target': exception.ShareShrinkingPossibleDataLoss},
{'source': exception.ManilaException("fake"),
'target': exception.Invalid})
@ddt.unpack
def test_shrink_share_error_on_resize_fs(self, source, target):
share, vol, size, server_details, _ = self._setup_shrink_mocks()
resize_mock = mock.Mock(side_effect=source)
self.mock_object(self._driver, '_resize_filesystem', resize_mock)
self.assertRaises(target, self._driver.shrink_share, share, size)
resize_mock.assert_called_once_with(server_details, vol,
new_size=size)
def test_shrink_share(self):
share, vol, size, server_details, helper = self._setup_shrink_mocks()
self.mock_object(self._driver, '_resize_filesystem')
self._driver.shrink_share(share, size)
self._driver._get_helper.assert_called_once_with(share)
self._driver._get_consumed_space.assert_called_once_with(
mock.ANY, server_details)
self._driver._get_volume.assert_called_once_with(mock.ANY, share['id'])
self._driver._unmount_device.assert_called_once_with(share,
server_details)
self._driver._resize_filesystem(
server_details, vol, new_size=size)
self._driver._mount_device(share, server_details, vol)
self.assertTrue(helper.disable_access_for_maintenance.called)
self.assertTrue(helper.restore_access_after_maintenance.called)
@ddt.data({'share_servers': [], 'result': None},
{'share_servers': None, 'result': None},
{'share_servers': ['fake'], 'result': 'fake'},
{'share_servers': ['fake', 'test'], 'result': 'fake'})
@ddt.unpack
def tests_choose_share_server_compatible_with_share(self, share_servers,
result):
fake_share = "fake"
actual_result = self._driver.choose_share_server_compatible_with_share(
self._context, share_servers, fake_share
)
self.assertEqual(result, actual_result)
@ddt.data({'consistency_group': {'share_server_id': 'fake'},
'result': {'id': 'fake'}},
{'consistency_group': None, 'result': {'id': 'fake'}},
{'consistency_group': {'share_server_id': 'test'},
'result': {'id': 'test'}})
@ddt.unpack
def tests_choose_share_server_compatible_with_share_and_cg(
self, consistency_group, result):
share_servers = [{'id': 'fake'}, {'id': 'test'}]
fake_share = "fake"
actual_result = self._driver.choose_share_server_compatible_with_share(
self._context, share_servers, fake_share,
consistency_group=consistency_group
)
self.assertEqual(result, actual_result)
def test_create_consistency_group(self):
FAKE_SNAP_DICT = get_fake_snap_dict()
result = self._driver.create_consistency_group(
self._context, FAKE_SNAP_DICT, share_server=self.server)
self.assertEqual(1, self.mock_debug_log.call_count)
self.assertEqual(1, self.mock_warning_log.call_count)
self.assertIsNone(result)
def test_delete_consistency_group(self):
FAKE_SNAP_DICT = get_fake_snap_dict()
result = self._driver.delete_consistency_group(
self._context, FAKE_SNAP_DICT, share_server=self.server)
self.assertEqual(1, self.mock_debug_log.call_count)
self.assertIsNone(result)
def test_create_cgsnapshot_no_cg_members(self):
FAKE_SNAP_DICT = dict(get_fake_snap_dict(), cgsnapshot_members=[])
mock_snapshot_creation = self.mock_object(generic.GenericShareDriver,
'create_snapshot')
result = self._driver.create_cgsnapshot(
self._context, FAKE_SNAP_DICT, share_server=self.server)
self.assertEqual(1, self.mock_debug_log.call_count)
self.assertEqual(2, self.mock_warning_log.call_count)
self.assertFalse(mock_snapshot_creation.called)
self.assertEqual((None, None), result)
@ddt.data(
{
'delete_snap_side_effect': None,
'expected_error_log_call_count': 0,
},
{
'delete_snap_side_effect': exception.ManilaException,
'expected_error_log_call_count': 1,
}
)
@ddt.unpack
def test_create_cgsnapshot_manila_exception_on_create_and_delete(
self, delete_snap_side_effect, expected_error_log_call_count):
FAKE_SNAP_DICT = get_fake_snap_dict()
# Append another fake share
FAKE_SHARE = dict(FAKE_SNAP_DICT['cgsnapshot_members'][0])
FAKE_SNAP_DICT['cgsnapshot_members'].append(FAKE_SHARE)
self.mock_object(generic.GenericShareDriver,
'create_snapshot',
mock.Mock(side_effect=[
None,
exception.ManilaException,
]))
self.mock_object(generic.GenericShareDriver,
'delete_snapshot',
mock.Mock(side_effect=delete_snap_side_effect))
self.assertRaises(exception.ManilaException,
self._driver.create_cgsnapshot,
self._context, FAKE_SNAP_DICT,
share_server=self.server)
self.assertEqual(2, self.mock_debug_log.call_count)
self.assertEqual(1, self.mock_warning_log.call_count)
self.assertEqual(1, self.mock_exception_log.call_count)
self.assertEqual(expected_error_log_call_count,
self.mock_error_log.call_count)
def test_create_cgsnapshot(self):
FAKE_SNAP_DICT = get_fake_snap_dict()
FAKE_SHARE_SNAPSHOT = {
'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f',
'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8',
}
mock_snapshot_creation = self.mock_object(generic.GenericShareDriver,
'create_snapshot')
result = self._driver.create_cgsnapshot(
self._context, FAKE_SNAP_DICT, share_server=self.server)
mock_snapshot_creation.assert_called_once_with(self._context,
FAKE_SHARE_SNAPSHOT,
self.server)
self.assertEqual(2, self.mock_debug_log.call_count)
self.assertEqual(1, self.mock_warning_log.call_count)
self.assertFalse(self.mock_error_log.called)
self.assertEqual((None, None), result)
def test_delete_cgsnapshot_manila_exception(self):
FAKE_SNAP_DICT = get_fake_snap_dict()
self.mock_object(generic.GenericShareDriver,
'delete_snapshot',
mock.Mock(side_effect=exception.ManilaException))
self.assertRaises(exception.ManilaException,
self._driver.delete_cgsnapshot,
self._context, FAKE_SNAP_DICT,
share_server=self.server)
self.assertEqual(1, self.mock_error_log.call_count)
def test_delete_cgsnapshot(self):
FAKE_SNAP_DICT = get_fake_snap_dict()
FAKE_SHARE_SNAPSHOT = {
'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f',
'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8',
}
mock_snapshot_creation = self.mock_object(generic.GenericShareDriver,
'delete_snapshot')
result = self._driver.delete_cgsnapshot(
self._context, FAKE_SNAP_DICT, share_server=self.server)
mock_snapshot_creation.assert_called_once_with(self._context,
FAKE_SHARE_SNAPSHOT,
self.server)
self.assertEqual(2, self.mock_debug_log.call_count)
self.assertEqual((None, None), result)
def test_create_consistency_group_from_cgsnapshot_no_members(self):
FAKE_CG_DICT = get_fake_cg_dict()
FAKE_CGSNAP_DICT = dict(get_fake_snap_dict(), cgsnapshot_members=[])
mock_share_creation = self.mock_object(generic.GenericShareDriver,
'create_share_from_snapshot')
result = self._driver.create_consistency_group_from_cgsnapshot(
self._context, FAKE_CG_DICT, FAKE_CGSNAP_DICT,
share_server=self.server)
self.assertFalse(self.mock_debug_log.called)
self.assertFalse(mock_share_creation.called)
self.assertEqual((None, None), result)
def test_create_consistency_group_from_cgsnapshot(self):
FAKE_CG_DICT = get_fake_cg_dict()
FAKE_CGSNAP_DICT = get_fake_snap_dict()
FAKE_COLLATED_INFO = get_fake_collated_cg_snap_info()
FAKE_SHARE_UPDATE_LIST = [
{
'id': '02a32f06e-14f2-45a5-9631-7483f1937bd8',
'export_locations': 'xyzzy',
}
]
self.mock_object(generic.GenericShareDriver,
'_collate_cg_snapshot_info',
mock.Mock(return_value=FAKE_COLLATED_INFO))
mock_share_creation = self.mock_object(generic.GenericShareDriver,
'create_share_from_snapshot',
mock.Mock(return_value='xyzzy'))
result = self._driver.create_consistency_group_from_cgsnapshot(
self._context, FAKE_CG_DICT, FAKE_CGSNAP_DICT,
share_server=self.server)
self.assertEqual((None, FAKE_SHARE_UPDATE_LIST), result)
self.assertEqual(1, self.mock_debug_log.call_count)
mock_share_creation.assert_called_once_with(
self._context,
FAKE_COLLATED_INFO[0]['share'],
FAKE_COLLATED_INFO[0]['snapshot'],
share_server=self.server
)
def test_collate_cg_snapshot_info_invalid_cg(self):
FAKE_CG_DICT = get_fake_cg_dict()
FAKE_CGSNAP_DICT = dict(get_fake_snap_dict(), cgsnapshot_members=[])
self.assertRaises(exception.InvalidConsistencyGroup,
self._driver._collate_cg_snapshot_info,
FAKE_CG_DICT,
FAKE_CGSNAP_DICT)
def test_collate_cg_snapshot(self):
FAKE_CG_DICT = get_fake_cg_dict()
FAKE_CGSNAP_DICT = get_fake_snap_dict()
FAKE_COLLATED_INFO = get_fake_collated_cg_snap_info()
result = self._driver._collate_cg_snapshot_info(
FAKE_CG_DICT, FAKE_CGSNAP_DICT)
self.assertEqual(FAKE_COLLATED_INFO, result)
@generic.ensure_server
def fake(driver_instance, context, share_server=None):
return share_server
@ddt.ddt
class GenericDriverEnsureServerTestCase(test.TestCase):
def setUp(self):
super(GenericDriverEnsureServerTestCase, self).setUp()
self._context = context.get_admin_context()
self.server = {'id': 'fake_id', 'backend_details': {'foo': 'bar'}}
self.dhss_false = type(
'Fake', (object,), {'driver_handles_share_servers': False})
self.dhss_true = type(
'Fake', (object,), {'driver_handles_share_servers': True})
def test_share_servers_are_not_handled_server_not_provided(self):
self.dhss_false.service_instance_manager = mock.Mock()
self.dhss_false.service_instance_manager.get_common_server = (
mock.Mock(return_value=self.server))
self.dhss_false.service_instance_manager.ensure_service_instance = (
mock.Mock(return_value=True))
actual = fake(self.dhss_false, self._context)
self.assertEqual(self.server, actual)
self.dhss_false.service_instance_manager.\
get_common_server.assert_called_once_with()
self.dhss_false.service_instance_manager.ensure_service_instance.\
assert_called_once_with(
self._context, self.server['backend_details'])
@ddt.data({'id': 'without_details'},
{'id': 'with_details', 'backend_details': {'foo': 'bar'}})
def test_share_servers_are_not_handled_server_provided(self, server):
self.assertRaises(
exception.ManilaException,
fake, self.dhss_false, self._context, share_server=server)
def test_share_servers_are_handled_server_provided(self):
self.dhss_true.service_instance_manager = mock.Mock()
self.dhss_true.service_instance_manager.ensure_service_instance = (
mock.Mock(return_value=True))
actual = fake(self.dhss_true, self._context, share_server=self.server)
self.assertEqual(self.server, actual)
self.dhss_true.service_instance_manager.ensure_service_instance.\
assert_called_once_with(
self._context, self.server['backend_details'])
def test_share_servers_are_handled_invalid_server_provided(self):
server = {'id': 'without_details'}
self.assertRaises(
exception.ManilaException,
fake, self.dhss_true, self._context, share_server=server)
def test_share_servers_are_handled_server_not_provided(self):
self.assertRaises(
exception.ManilaException, fake, self.dhss_true, self._context)
@ddt.ddt
class NFSHelperTestCase(test.TestCase):
"""Test case for NFS helper of generic driver."""
def setUp(self):
super(NFSHelperTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self.fake_conf = manila.share.configuration.Configuration(None)
self._ssh_exec = mock.Mock(return_value=('', ''))
self._execute = mock.Mock(return_value=('', ''))
self._helper = generic.NFSHelper(self._execute, self._ssh_exec,
self.fake_conf)
ip = '10.254.0.3'
self.server = fake_compute.FakeServer(
ip=ip, public_address=ip, instance_id='fake_instance_id')
self.share_name = 'fake_share_name'
def test_create_export(self):
ret = self._helper.create_export(self.server, self.share_name)
expected_location = ':'.join([self.server['public_address'],
os.path.join(CONF.share_mount_path,
self.share_name)])
self.assertEqual(ret, expected_location)
@ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO)
def test_allow_access(self, data):
self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files')
self._helper.allow_access(
self.server, self.share_name, 'ip', data, '10.0.0.2')
local_path = os.path.join(CONF.share_mount_path, self.share_name)
self._ssh_exec.assert_has_calls([
mock.call(self.server, ['sudo', 'exportfs']),
mock.call(self.server, ['sudo', 'exportfs', '-o',
'%s,no_subtree_check' % data,
':'.join(['10.0.0.2', local_path])])
])
self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with(
self.server)
def test_allow_access_no_ip(self):
self.assertRaises(
exception.InvalidShareAccess,
self._helper.allow_access,
self.server, self.share_name,
'fake_type', 'fake_level', 'fake_rule')
@ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO)
def test_deny_access(self, data):
self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files')
local_path = os.path.join(CONF.share_mount_path, self.share_name)
access = dict(
access_to='10.0.0.2', access_type='ip', access_level=data)
self._helper.deny_access(self.server, self.share_name, access)
export_string = ':'.join(['10.0.0.2', local_path])
expected_exec = ['sudo', 'exportfs', '-u', export_string]
self._ssh_exec.assert_called_once_with(self.server, expected_exec)
self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with(
self.server)
def test_sync_nfs_temp_and_perm_files(self):
self._helper._sync_nfs_temp_and_perm_files(self.server)
self._helper._ssh_exec.assert_called_once_with(self.server, mock.ANY)
@ddt.data('/foo/bar', '5.6.7.8:/bar/quuz', '5.6.7.88:/foo/quuz')
def test_get_exports_for_share(self, export_location):
server = dict(public_address='1.2.3.4')
result = self._helper.get_exports_for_share(server, export_location)
path = export_location.split(':')[-1]
self.assertEqual([':'.join([server['public_address'], path])], result)
@ddt.data(
{'public_address_with_suffix': 'foo'},
{'with_prefix_public_address': 'bar'},
{'with_prefix_public_address_and_with_suffix': 'quuz'}, {})
def test_get_exports_for_share_with_error(self, server):
export_location = '1.2.3.4:/foo/bar'
self.assertRaises(
exception.ManilaException,
self._helper.get_exports_for_share, server, export_location)
@ddt.data('/foo/bar', '5.6.7.8:/foo/bar', '5.6.7.88:fake:/foo/bar')
def test_get_share_path_by_export_location(self, export_location):
result = self._helper.get_share_path_by_export_location(
dict(), export_location)
self.assertEqual('/foo/bar', result)
def test_disable_access_for_maintenance(self):
fake_maintenance_path = "fake.path"
share_mount_path = os.path.join(
self._helper.configuration.share_mount_path, self.share_name)
self.mock_object(self._helper, '_ssh_exec')
self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files')
self.mock_object(self._helper, '_get_maintenance_file_path',
mock.Mock(return_value=fake_maintenance_path))
self._helper.disable_access_for_maintenance(
self.server, self.share_name)
self._helper._ssh_exec.assert_any_call(
self.server,
['cat', const.NFS_EXPORTS_FILE,
'| grep', self.share_name,
'| sudo tee', fake_maintenance_path]
)
self._helper._ssh_exec.assert_any_call(
self.server,
['sudo', 'exportfs', '-u', share_mount_path]
)
self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with(
self.server
)
def test_restore_access_after_maintenance(self):
fake_maintenance_path = "fake.path"
self.mock_object(self._helper, '_get_maintenance_file_path',
mock.Mock(return_value=fake_maintenance_path))
self.mock_object(self._helper, '_ssh_exec')
self._helper.restore_access_after_maintenance(
self.server, self.share_name)
self._helper._ssh_exec.assert_called_once_with(
self.server,
['cat', fake_maintenance_path,
'| sudo tee -a', const.NFS_EXPORTS_FILE,
'&& sudo exportfs -r', '&& sudo rm -f',
fake_maintenance_path]
)
@ddt.ddt
class CIFSHelperTestCase(test.TestCase):
"""Test case for CIFS helper of generic driver."""
def setUp(self):
super(CIFSHelperTestCase, self).setUp()
self.server_details = {'instance_id': 'fake',
'public_address': '1.2.3.4', }
self.share_name = 'fake_share_name'
self.fake_conf = manila.share.configuration.Configuration(None)
self._ssh_exec = mock.Mock(return_value=('', ''))
self._execute = mock.Mock(return_value=('', ''))
self._helper = generic.CIFSHelper(self._execute, self._ssh_exec,
self.fake_conf)
self.access = dict(
access_level=const.ACCESS_LEVEL_RW,
access_type='ip',
access_to='1.1.1.1')
def test_init_helper(self):
self._helper.init_helper(self.server_details)
self._helper._ssh_exec.assert_called_once_with(
self.server_details,
['sudo', 'net', 'conf', 'list'],
)
def test_create_export_share_does_not_exist(self):
def fake_ssh_exec(*args, **kwargs):
if 'showshare' in args[1]:
raise exception.ProcessExecutionError()
else:
return ('', '')
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(side_effect=fake_ssh_exec))
ret = self._helper.create_export(self.server_details, self.share_name)
expected_location = '\\\\%s\\%s' % (
self.server_details['public_address'], self.share_name)
self.assertEqual(ret, expected_location)
share_path = os.path.join(
self._helper.configuration.share_mount_path,
self.share_name)
self._helper._ssh_exec.assert_has_calls([
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'showshare', self.share_name, ]
),
mock.call(
self.server_details,
[
'sudo', 'net', 'conf', 'addshare', self.share_name,
share_path, 'writeable=y', 'guest_ok=y',
]
),
mock.call(self.server_details, mock.ANY),
])
def test_create_export_share_exist_recreate_true(self):
ret = self._helper.create_export(self.server_details, self.share_name,
recreate=True)
expected_location = '\\\\%s\\%s' % (
self.server_details['public_address'], self.share_name)
self.assertEqual(ret, expected_location)
share_path = os.path.join(
self._helper.configuration.share_mount_path,
self.share_name)
self._helper._ssh_exec.assert_has_calls([
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'showshare', self.share_name, ]
),
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'delshare', self.share_name, ]
),
mock.call(
self.server_details,
[
'sudo', 'net', 'conf', 'addshare', self.share_name,
share_path, 'writeable=y', 'guest_ok=y',
]
),
mock.call(self.server_details, mock.ANY),
])
def test_create_export_share_exist_recreate_false(self):
self.assertRaises(
exception.ShareBackendException,
self._helper.create_export,
self.server_details,
self.share_name,
recreate=False,
)
self._helper._ssh_exec.assert_has_calls([
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'showshare', self.share_name, ]
),
])
def test_remove_export(self):
self._helper.remove_export(self.server_details, self.share_name)
self._helper._ssh_exec.assert_called_once_with(
self.server_details,
['sudo', 'net', 'conf', 'delshare', self.share_name],
)
def test_remove_export_forcibly(self):
delshare_command = ['sudo', 'net', 'conf', 'delshare', self.share_name]
def fake_ssh_exec(*args, **kwargs):
if delshare_command == args[1]:
raise exception.ProcessExecutionError()
else:
return ('', '')
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(side_effect=fake_ssh_exec))
self._helper.remove_export(self.server_details, self.share_name)
self._helper._ssh_exec.assert_has_calls([
mock.call(
self.server_details,
['sudo', 'net', 'conf', 'delshare', self.share_name],
),
mock.call(
self.server_details,
['sudo', 'smbcontrol', 'all', 'close-share', self.share_name],
),
])
def test_allow_access_ip_exist(self):
hosts = [self.access['access_to'], ]
self.mock_object(self._helper, '_get_allow_hosts',
mock.Mock(return_value=hosts))
self.mock_object(self._helper, '_set_allow_hosts')
self.assertRaises(
exception.ShareAccessExists,
self._helper.allow_access,
self.server_details,
self.share_name,
self.access['access_type'],
self.access['access_level'],
self.access['access_to'])
self._helper._get_allow_hosts.assert_called_once_with(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_has_calls([])
def test_allow_access_ip_does_not_exist(self):
hosts = []
self.mock_object(self._helper, '_get_allow_hosts',
mock.Mock(return_value=hosts))
self.mock_object(self._helper, '_set_allow_hosts')
self._helper.allow_access(
self.server_details, self.share_name,
self.access['access_type'], self.access['access_level'],
self.access['access_to'])
self._helper._get_allow_hosts.assert_called_once_with(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_called_once_with(
self.server_details, hosts, self.share_name)
def test_allow_access_wrong_type(self):
self.assertRaises(
exception.InvalidShareAccess,
self._helper.allow_access,
self.server_details,
self.share_name, 'fake', const.ACCESS_LEVEL_RW, '1.1.1.1')
@ddt.data(const.ACCESS_LEVEL_RO, 'fake')
def test_allow_access_wrong_access_level(self, data):
self.assertRaises(
exception.InvalidShareAccessLevel,
self._helper.allow_access,
self.server_details,
self.share_name, 'ip', data, '1.1.1.1')
@ddt.data(const.ACCESS_LEVEL_RO, 'fake')
def test_deny_access_unsupported_access_level(self, data):
access = dict(access_to='1.1.1.1', access_level=data)
self.mock_object(self._helper, '_get_allow_hosts')
self.mock_object(self._helper, '_set_allow_hosts')
self._helper.deny_access(self.server_details, self.share_name, access)
self.assertFalse(self._helper._get_allow_hosts.called)
self.assertFalse(self._helper._set_allow_hosts.called)
def test_deny_access_list_has_value(self):
hosts = [self.access['access_to'], ]
self.mock_object(self._helper, '_get_allow_hosts',
mock.Mock(return_value=hosts))
self.mock_object(self._helper, '_set_allow_hosts')
self._helper.deny_access(
self.server_details, self.share_name, self.access)
self._helper._get_allow_hosts.assert_called_once_with(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_called_once_with(
self.server_details, [], self.share_name)
def test_deny_access_list_does_not_have_value(self):
hosts = []
self.mock_object(self._helper, '_get_allow_hosts',
mock.Mock(return_value=hosts))
self.mock_object(self._helper, '_set_allow_hosts')
self._helper.deny_access(
self.server_details, self.share_name, self.access)
self._helper._get_allow_hosts.assert_called_once_with(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_has_calls([])
def test_deny_access_force(self):
self.mock_object(
self._helper,
'_get_allow_hosts',
mock.Mock(side_effect=exception.ProcessExecutionError()),
)
self.mock_object(self._helper, '_set_allow_hosts')
self._helper.deny_access(
self.server_details, self.share_name, self.access, force=True)
self._helper._get_allow_hosts.assert_called_once_with(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_has_calls([])
def test_deny_access_not_force(self):
def raise_process_execution_error(*args, **kwargs):
raise exception.ProcessExecutionError()
self.mock_object(self._helper, '_get_allow_hosts',
mock.Mock(side_effect=raise_process_execution_error))
self.mock_object(self._helper, '_set_allow_hosts')
self.assertRaises(
exception.ProcessExecutionError,
self._helper.deny_access,
self.server_details, self.share_name, self.access)
self._helper._get_allow_hosts.assert_called_once_with(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_has_calls([])
@ddt.data(
'', '1.2.3.4:/nfs/like/export', '/1.2.3.4/foo', '\\1.2.3.4\\foo',
'//1.2.3.4\\mixed_slashes_and_backslashes_one',
'\\\\1.2.3.4/mixed_slashes_and_backslashes_two')
def test__get_share_group_name_from_export_location(self, export_location):
self.assertRaises(
exception.InvalidShare,
self._helper._get_share_group_name_from_export_location,
export_location)
@ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo')
def test_get_exports_for_share(self, export_location):
server = dict(public_address='1.2.3.4')
self.mock_object(
self._helper, '_get_share_group_name_from_export_location',
mock.Mock(side_effect=(
self._helper._get_share_group_name_from_export_location)))
result = self._helper.get_exports_for_share(server, export_location)
expected_export_location = ['\\\\%s\\foo' % server['public_address']]
self.assertEqual(expected_export_location, result)
self._helper._get_share_group_name_from_export_location.\
assert_called_once_with(export_location)
@ddt.data(
{'public_address_with_suffix': 'foo'},
{'with_prefix_public_address': 'bar'},
{'with_prefix_public_address_and_with_suffix': 'quuz'}, {})
def test_get_exports_for_share_with_exception(self, server):
export_location = '1.2.3.4:/foo/bar'
self.assertRaises(
exception.ManilaException,
self._helper.get_exports_for_share, server, export_location)
@ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo')
def test_get_share_path_by_export_location(self, export_location):
fake_path = ' /bar/quuz\n '
fake_server = dict()
self.mock_object(
self._helper, '_ssh_exec',
mock.Mock(return_value=(fake_path, 'fake')))
self.mock_object(
self._helper, '_get_share_group_name_from_export_location',
mock.Mock(side_effect=(
self._helper._get_share_group_name_from_export_location)))
result = self._helper.get_share_path_by_export_location(
fake_server, export_location)
self.assertEqual('/bar/quuz', result)
self._helper._ssh_exec.assert_called_once_with(
fake_server, ['sudo', 'net', 'conf', 'getparm', 'foo', 'path'])
self._helper._get_share_group_name_from_export_location.\
assert_called_once_with(export_location)
def test_disable_access_for_maintenance(self):
allowed_hosts = ['test', 'test2']
maintenance_path = os.path.join(
self._helper.configuration.share_mount_path,
"%s.maintenance" % self.share_name)
self.mock_object(self._helper, '_set_allow_hosts')
self.mock_object(self._helper, '_get_allow_hosts',
mock.Mock(return_value=allowed_hosts))
self._helper.disable_access_for_maintenance(
self.server_details, self.share_name)
self._helper._get_allow_hosts.assert_called_once_with(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_called_once_with(
self.server_details, [], self.share_name)
valid_cmd = ['echo', "'test test2'", '| sudo tee', maintenance_path]
self._helper._ssh_exec.assert_called_once_with(
self.server_details, valid_cmd)
def test_restore_access_after_maintenance(self):
fake_maintenance_path = "test.path"
self.mock_object(self._helper, '_set_allow_hosts')
self.mock_object(self._helper, '_get_maintenance_file_path',
mock.Mock(return_value=fake_maintenance_path))
self.mock_object(self._helper, '_ssh_exec',
mock.Mock(side_effect=[("fake fake2", 0), "fake"]))
self._helper.restore_access_after_maintenance(
self.server_details, self.share_name)
self._helper._set_allow_hosts.assert_called_once_with(
self.server_details, ['fake', 'fake2'], self.share_name)
self._helper._ssh_exec.assert_any_call(
self.server_details, ['cat', fake_maintenance_path])
self._helper._ssh_exec.assert_any_call(
self.server_details, ['sudo rm -f', fake_maintenance_path])
|
|
# -*- encoding: utf-8 -*-
__author__ = 'kotaimen'
__date__ = '07/11/2017'
from troposphere import Base64, FindInMap, GetAtt, Join, Select, Sub
from troposphere import ImportValue, Export
from troposphere import Condition, And, Equals, If, Not, Or
from troposphere import Template, Parameter, Ref, Tags, Output
from troposphere import AWS_ACCOUNT_ID, AWS_REGION, AWS_STACK_ID, \
AWS_STACK_NAME, AWS_NO_VALUE
from troposphere import Delete, Retain, Snapshot
from troposphere.policies import CreationPolicy, ResourceSignal, UpdatePolicy, \
AutoScalingReplacingUpdate, AutoScalingRollingUpdate
import troposphere.cloudformation as cloudformation
import troposphere.efs as efs
import troposphere.ec2 as ec2
import troposphere.iam as iam
import troposphere.rds as rds
from awacs.aws import Policy, Allow, Deny, Statement, Principal, Everybody
from awacs.aws import Condition, Bool, ArnEquals, StringEquals, IpAddress, Null
from awacs.aws import CurrentTime, EpochTime, MultiFactorAuthAge, Referer, \
SecureTransport, SourceArn, SourceIp, UserAgent
import awacs.sts
import awacs.cloudformation
import awacs.iam
import awacs.ec2
import awacs.logs
import cfnutil
#
# Template
#
t = Template()
t.add_version('2010-09-09')
t.add_description('Simple Elastic File System')
#
# Interface
#
parameter_groups = [
{
'Label': {'default': 'Network Configuration'},
'Parameters':
[
'VpcId',
'SubnetIds',
'NumberOfSubnets',
'SecurityGroup',
]
},
{
'Label': {'default': 'Filesystem Configuration'},
'Parameters':
[
'PerformanceMode',
]
},
{
'Label': {'default': 'Security Configuration'},
'Parameters':
[
'Encrypted',
'KmsKeyId',
]
},
]
t.add_metadata(
{
'AWS::CloudFormation::Interface': {
'ParameterGroups': parameter_groups,
'ParameterLabels':
dict(cfnutil.generate_parameter_labels(parameter_groups))
}
}
)
#
# Parameters
#
param_vpcid = t.add_parameter(Parameter(
'VpcId',
Description='VpcId of an existing VPC.',
Type='AWS::EC2::VPC::Id'
))
param_subnetids = t.add_parameter(Parameter(
'SubnetIds',
Description='SubnetIds of existing subnets of the VPC where mount '
'target will be created. Note: for each file system, '
'you can create only one mount target per AZ.',
Type='List<AWS::EC2::Subnet::Id>',
))
param_num_of_subnets = t.add_parameter(Parameter(
'NumberOfSubnets',
Description='Number of subnets in SubnetIds parameter',
Type='Number',
Default=2,
MinValue=1,
MaxValue=6,
))
param_sg = t.add_parameter(Parameter(
'SecurityGroup',
Description='Mount target security group id, a new security group will be '
'created this is left empty.',
Type='String',
Default='',
))
param_performance_mode = t.add_parameter(Parameter(
'PerformanceMode',
Description='Performance mode of the file system.',
Type='String',
Default='generalPurpose',
AllowedValues=['generalPurpose', 'maxIO']
))
param_encrypted = t.add_parameter(Parameter(
'Encrypted',
Description='Indicates whether the file system is encrypted.',
Default='false',
Type='String',
AllowedValues=['true', 'false'],
))
param_kms_key = t.add_parameter(Parameter(
'KmsKeyId',
Description='The ARN of the KMS master key that is used to encrypt the '
'file system, If you enable the Encrypted property but '
'don\'t specify this property, this template uses service '
'default master key.',
Default='',
Type='String'
))
#
# Conditions
#
t.add_condition(
'TwoSubnetsCondition',
Or(
Equals(Ref(param_num_of_subnets), '2'),
Equals(Ref(param_num_of_subnets), '3'),
Equals(Ref(param_num_of_subnets), '4'),
Equals(Ref(param_num_of_subnets), '5'),
Equals(Ref(param_num_of_subnets), '6'),
)
)
t.add_condition(
'ThreeSubnetsCondition',
Or(
Equals(Ref(param_num_of_subnets), '3'),
Equals(Ref(param_num_of_subnets), '4'),
Equals(Ref(param_num_of_subnets), '5'),
Equals(Ref(param_num_of_subnets), '6'),
)
)
t.add_condition(
'FourSubnetsCondition',
Or(
Equals(Ref(param_num_of_subnets), '4'),
Equals(Ref(param_num_of_subnets), '5'),
Equals(Ref(param_num_of_subnets), '6'),
)
)
t.add_condition(
'FiveSubnetsCondition',
Or(
Equals(Ref(param_num_of_subnets), '5'),
Equals(Ref(param_num_of_subnets), '6'),
)
)
t.add_condition(
'SixSubnetsCondition',
Equals(Ref(param_num_of_subnets), '6'),
)
t.add_condition(
'StorageEncryptedCondition',
Equals(Ref(param_encrypted), 'true'),
)
t.add_condition(
'DefaultKmsCondition',
Equals(Ref(param_kms_key), '')
)
t.add_condition(
'CreateSecurityGroupCondition',
Equals(Ref(param_sg), '')
)
#
# Resources
#
file_system = t.add_resource(efs.FileSystem(
'FileSystem',
Encrypted=Ref(param_encrypted),
KmsKeyId=If('StorageEncryptedCondition',
If('DefaultKmsCondition',
Ref(AWS_NO_VALUE),
Ref(param_kms_key)),
Ref(AWS_NO_VALUE),
),
PerformanceMode=Ref(param_performance_mode),
))
efs_sg = t.add_resource(ec2.SecurityGroup(
'EfsSecurityGroup',
Condition='CreateSecurityGroupCondition',
VpcId=Ref(param_vpcid),
GroupDescription='Enable local postgres access',
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp',
FromPort='2049',
ToPort='2049',
CidrIp='0.0.0.0/0',
),
],
))
mount_target_1 = t.add_resource(efs.MountTarget(
'MountTarget1',
FileSystemId=Ref(file_system),
SubnetId=Select(0, Ref(param_subnetids)),
SecurityGroups=[
If(
'CreateSecurityGroupCondition',
Ref(efs_sg),
Ref(param_sg)
)]
))
mount_target_2 = t.add_resource(efs.MountTarget(
'MountTarget2',
Condition='TwoSubnetsCondition',
FileSystemId=Ref(file_system),
SubnetId=Select(1, Ref(param_subnetids)),
SecurityGroups=[
If(
'CreateSecurityGroupCondition',
Ref(efs_sg),
Ref(param_sg)
)]
))
mount_target_3 = t.add_resource(efs.MountTarget(
'MountTarget3',
Condition='ThreeSubnetsCondition',
FileSystemId=Ref(file_system),
SubnetId=Select(2, Ref(param_subnetids)),
SecurityGroups=[
If(
'CreateSecurityGroupCondition',
Ref(efs_sg),
Ref(param_sg)
)]
))
mount_target_4 = t.add_resource(efs.MountTarget(
'MountTarget4',
Condition='FourSubnetsCondition',
FileSystemId=Ref(file_system),
SubnetId=Select(3, Ref(param_subnetids)),
SecurityGroups=[
If(
'CreateSecurityGroupCondition',
Ref(efs_sg),
Ref(param_sg)
)]
))
mount_target_5 = t.add_resource(efs.MountTarget(
'MountTarget5',
Condition='FiveSubnetsCondition',
FileSystemId=Ref(file_system),
SubnetId=Select(4, Ref(param_subnetids)),
SecurityGroups=[
If(
'CreateSecurityGroupCondition',
Ref(efs_sg),
Ref(param_sg)
)]
))
mount_target_6 = t.add_resource(efs.MountTarget(
'MountTarget6',
Condition='SixSubnetsCondition',
FileSystemId=Ref(file_system),
SubnetId=Select(5, Ref(param_subnetids)),
SecurityGroups=[
If(
'CreateSecurityGroupCondition',
Ref(efs_sg),
Ref(param_sg)
)]
))
#
# Output
#
t.add_output([
Output('MountPoint',
Description='EFS mount point',
Value=Sub('${FileSystem}.efs.${AWS::Region}.amazonaws.com'),
),
Output('ElasticFileSystem',
Description='ElasticFileSystem',
Value=Ref(file_system)
),
Output('MountTargetSecurityGroup',
Condition='CreateSecurityGroupCondition',
Description='MountTargetSecurityGroup',
Value=Ref(efs_sg)
),
Output('MountTarget1',
Description='MountTarget1',
Value=Ref(mount_target_1)
),
])
#
# Write template
#
cfnutil.write(t, __file__.replace('Template.py', '.template.yaml'),
write_yaml=True)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class OptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
# Note that for eager execution, minimize expects a function instead of a
# Tensor.
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64), name='global_step_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd_op.minimize(loss, global_step, [var0, var1])
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_deprecated_v1
def testAggregationMethod(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost,
global_step, [var0, var1],
aggregation_method=gradients_util.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_deprecated_v1
def testPrecomputedGradient(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
grad_loss = constant_op.constant([42, -42], dtype=dtype)
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost, global_step, [var0, var1], grad_loss=grad_loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0))
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testNoVariables(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# pylint: disable=cell-var-from-loop
def loss():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, trainable=False, name='a')
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, trainable=False, name='b')
return 5 * var0 + var1
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegex(ValueError, 'No.*variables'):
sgd_op.minimize(loss)
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
# pylint: disable=cell-var-from-loop
def loss():
return 5 * var0
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegex(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss():
return constant_op.constant(5.0)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegex(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegex(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes
def testGradientsAsVariables(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
resource_variable_ops.ResourceVariable(array_ops.zeros([2], dtype),
name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
self.evaluate(variables.global_variables_initializer())
# Run convert_ops to achieve the gradients converting
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd_op.apply_gradients(converted_grads_and_vars)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testComputeGradientsWithTensors(self):
x = ops.convert_to_tensor(1.0)
def f():
return x * x
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(f, [x])
self.assertEqual(1, len(grads_and_vars))
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd_op.apply_gradients(grads_and_vars)
@test_util.run_deprecated_v1
def testTrainOp(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
self.assertTrue(opt_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
@test_util.run_deprecated_v1
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with self.cached_session():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
self.assertAllClose([0., 0.], self.evaluate(var1))
if __name__ == '__main__':
test.main()
|
|
# encoding: utf-8
"""Magic functions for InteractiveShell.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
# Copyright (C) 2001 Fernando Perez <fperez@colorado.edu>
# Copyright (C) 2008 The IPython Development Team
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import os
import re
import sys
from getopt import getopt, GetoptError
from traitlets.config.configurable import Configurable
from IPython.core import oinspect
from IPython.core.error import UsageError
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from decorator import decorator
from IPython.utils.ipstruct import Struct
from IPython.utils.process import arg_split
from IPython.utils.text import dedent
from traitlets import Bool, Dict, Instance, observe
from logging import error
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# A dict we'll use for each class that has magics, used as temporary storage to
# pass information between the @line/cell_magic method decorators and the
# @magics_class class decorator, because the method decorators have no
# access to the class when they run. See for more details:
# http://stackoverflow.com/questions/2366713/can-a-python-decorator-of-an-instance-method-access-the-class
magics = dict(line={}, cell={})
magic_kinds = ('line', 'cell')
magic_spec = ('line', 'cell', 'line_cell')
magic_escapes = dict(line=ESC_MAGIC, cell=ESC_MAGIC2)
#-----------------------------------------------------------------------------
# Utility classes and functions
#-----------------------------------------------------------------------------
class Bunch: pass
def on_off(tag):
"""Return an ON/OFF string for a 1/0 input. Simple utility function."""
return ['OFF','ON'][tag]
def compress_dhist(dh):
"""Compress a directory history into a new one with at most 20 entries.
Return a new list made from the first and last 10 elements of dhist after
removal of duplicates.
"""
head, tail = dh[:-10], dh[-10:]
newhead = []
done = set()
for h in head:
if h in done:
continue
newhead.append(h)
done.add(h)
return newhead + tail
def needs_local_scope(func):
"""Decorator to mark magic functions which need to local scope to run."""
func.needs_local_scope = True
return func
#-----------------------------------------------------------------------------
# Class and method decorators for registering magics
#-----------------------------------------------------------------------------
def magics_class(cls):
"""Class decorator for all subclasses of the main Magics class.
Any class that subclasses Magics *must* also apply this decorator, to
ensure that all the methods that have been decorated as line/cell magics
get correctly registered in the class instance. This is necessary because
when method decorators run, the class does not exist yet, so they
temporarily store their information into a module global. Application of
this class decorator copies that global data to the class instance and
clears the global.
Obviously, this mechanism is not thread-safe, which means that the
*creation* of subclasses of Magic should only be done in a single-thread
context. Instantiation of the classes has no restrictions. Given that
these classes are typically created at IPython startup time and before user
application code becomes active, in practice this should not pose any
problems.
"""
cls.registered = True
cls.magics = dict(line = magics['line'],
cell = magics['cell'])
magics['line'] = {}
magics['cell'] = {}
return cls
def record_magic(dct, magic_kind, magic_name, func):
"""Utility function to store a function as a magic of a specific kind.
Parameters
----------
dct : dict
A dictionary with 'line' and 'cell' subdicts.
magic_kind : str
Kind of magic to be stored.
magic_name : str
Key to store the magic as.
func : function
Callable object to store.
"""
if magic_kind == 'line_cell':
dct['line'][magic_name] = dct['cell'][magic_name] = func
else:
dct[magic_kind][magic_name] = func
def validate_type(magic_kind):
"""Ensure that the given magic_kind is valid.
Check that the given magic_kind is one of the accepted spec types (stored
in the global `magic_spec`), raise ValueError otherwise.
"""
if magic_kind not in magic_spec:
raise ValueError('magic_kind must be one of %s, %s given' %
magic_kinds, magic_kind)
# The docstrings for the decorator below will be fairly similar for the two
# types (method and function), so we generate them here once and reuse the
# templates below.
_docstring_template = \
"""Decorate the given {0} as {1} magic.
The decorator can be used with or without arguments, as follows.
i) without arguments: it will create a {1} magic named as the {0} being
decorated::
@deco
def foo(...)
will create a {1} magic named `foo`.
ii) with one string argument: which will be used as the actual name of the
resulting magic::
@deco('bar')
def foo(...)
will create a {1} magic named `bar`.
To register a class magic use ``Interactiveshell.register_magic(class or instance)``.
"""
# These two are decorator factories. While they are conceptually very similar,
# there are enough differences in the details that it's simpler to have them
# written as completely standalone functions rather than trying to share code
# and make a single one with convoluted logic.
def _method_magic_marker(magic_kind):
"""Decorator factory for methods in Magics subclasses.
"""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
call = lambda f, *a, **k: f(*a, **k)
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.__name__
retval = decorator(call, func)
record_magic(magics, magic_kind, name, name)
elif isinstance(arg, str):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
record_magic(magics, magic_kind, name, func.__name__)
return decorator(call, func)
retval = mark
else:
raise TypeError("Decorator can only be called with "
"string or function")
return retval
# Ensure the resulting decorator has a usable docstring
magic_deco.__doc__ = _docstring_template.format('method', magic_kind)
return magic_deco
def _function_magic_marker(magic_kind):
"""Decorator factory for standalone functions.
"""
validate_type(magic_kind)
# This is a closure to capture the magic_kind. We could also use a class,
# but it's overkill for just that one bit of state.
def magic_deco(arg):
call = lambda f, *a, **k: f(*a, **k)
# Find get_ipython() in the caller's namespace
caller = sys._getframe(1)
for ns in ['f_locals', 'f_globals', 'f_builtins']:
get_ipython = getattr(caller, ns).get('get_ipython')
if get_ipython is not None:
break
else:
raise NameError('Decorator can only run in context where '
'`get_ipython` exists')
ip = get_ipython()
if callable(arg):
# "Naked" decorator call (just @foo, no args)
func = arg
name = func.__name__
ip.register_magic_function(func, magic_kind, name)
retval = decorator(call, func)
elif isinstance(arg, str):
# Decorator called with arguments (@foo('bar'))
name = arg
def mark(func, *a, **kw):
ip.register_magic_function(func, magic_kind, name)
return decorator(call, func)
retval = mark
else:
raise TypeError("Decorator can only be called with "
"string or function")
return retval
# Ensure the resulting decorator has a usable docstring
ds = _docstring_template.format('function', magic_kind)
ds += dedent("""
Note: this decorator can only be used in a context where IPython is already
active, so that the `get_ipython()` call succeeds. You can therefore use
it in your startup files loaded after IPython initializes, but *not* in the
IPython configuration file itself, which is executed before IPython is
fully up and running. Any file located in the `startup` subdirectory of
your configuration profile will be OK in this sense.
""")
magic_deco.__doc__ = ds
return magic_deco
MAGIC_NO_VAR_EXPAND_ATTR = '_ipython_magic_no_var_expand'
def no_var_expand(magic_func):
"""Mark a magic function as not needing variable expansion
By default, IPython interprets `{a}` or `$a` in the line passed to magics
as variables that should be interpolated from the interactive namespace
before passing the line to the magic function.
This is not always desirable, e.g. when the magic executes Python code
(%timeit, %time, etc.).
Decorate magics with `@no_var_expand` to opt-out of variable expansion.
.. versionadded:: 7.3
"""
setattr(magic_func, MAGIC_NO_VAR_EXPAND_ATTR, True)
return magic_func
# Create the actual decorators for public use
# These three are used to decorate methods in class definitions
line_magic = _method_magic_marker('line')
cell_magic = _method_magic_marker('cell')
line_cell_magic = _method_magic_marker('line_cell')
# These three decorate standalone functions and perform the decoration
# immediately. They can only run where get_ipython() works
register_line_magic = _function_magic_marker('line')
register_cell_magic = _function_magic_marker('cell')
register_line_cell_magic = _function_magic_marker('line_cell')
#-----------------------------------------------------------------------------
# Core Magic classes
#-----------------------------------------------------------------------------
class MagicsManager(Configurable):
"""Object that handles all magic-related functionality for IPython.
"""
# Non-configurable class attributes
# A two-level dict, first keyed by magic type, then by magic function, and
# holding the actual callable object as value. This is the dict used for
# magic function dispatch
magics = Dict()
# A registry of the original objects that we've been given holding magics.
registry = Dict()
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
auto_magic = Bool(True, help=
"Automatically call line magics without requiring explicit % prefix"
).tag(config=True)
@observe('auto_magic')
def _auto_magic_changed(self, change):
self.shell.automagic = change['new']
_auto_status = [
'Automagic is OFF, % prefix IS needed for line magics.',
'Automagic is ON, % prefix IS NOT needed for line magics.']
user_magics = Instance('IPython.core.magics.UserMagics', allow_none=True)
def __init__(self, shell=None, config=None, user_magics=None, **traits):
super(MagicsManager, self).__init__(shell=shell, config=config,
user_magics=user_magics, **traits)
self.magics = dict(line={}, cell={})
# Let's add the user_magics to the registry for uniformity, so *all*
# registered magic containers can be found there.
self.registry[user_magics.__class__.__name__] = user_magics
def auto_status(self):
"""Return descriptive string with automagic status."""
return self._auto_status[self.auto_magic]
def lsmagic(self):
"""Return a dict of currently available magic functions.
The return dict has the keys 'line' and 'cell', corresponding to the
two types of magics we support. Each value is a list of names.
"""
return self.magics
def lsmagic_docs(self, brief=False, missing=''):
"""Return dict of documentation of magic functions.
The return dict has the keys 'line' and 'cell', corresponding to the
two types of magics we support. Each value is a dict keyed by magic
name whose value is the function docstring. If a docstring is
unavailable, the value of `missing` is used instead.
If brief is True, only the first line of each docstring will be returned.
"""
docs = {}
for m_type in self.magics:
m_docs = {}
for m_name, m_func in self.magics[m_type].items():
if m_func.__doc__:
if brief:
m_docs[m_name] = m_func.__doc__.split('\n', 1)[0]
else:
m_docs[m_name] = m_func.__doc__.rstrip()
else:
m_docs[m_name] = missing
docs[m_type] = m_docs
return docs
def register(self, *magic_objects):
"""Register one or more instances of Magics.
Take one or more classes or instances of classes that subclass the main
`core.Magic` class, and register them with IPython to use the magic
functions they provide. The registration process will then ensure that
any methods that have decorated to provide line and/or cell magics will
be recognized with the `%x`/`%%x` syntax as a line/cell magic
respectively.
If classes are given, they will be instantiated with the default
constructor. If your classes need a custom constructor, you should
instanitate them first and pass the instance.
The provided arguments can be an arbitrary mix of classes and instances.
Parameters
----------
magic_objects : one or more classes or instances
"""
# Start by validating them to ensure they have all had their magic
# methods registered at the instance level
for m in magic_objects:
if not m.registered:
raise ValueError("Class of magics %r was constructed without "
"the @register_magics class decorator")
if isinstance(m, type):
# If we're given an uninstantiated class
m = m(shell=self.shell)
# Now that we have an instance, we can register it and update the
# table of callables
self.registry[m.__class__.__name__] = m
for mtype in magic_kinds:
self.magics[mtype].update(m.magics[mtype])
def register_function(self, func, magic_kind='line', magic_name=None):
"""Expose a standalone function as magic function for IPython.
This will create an IPython magic (line, cell or both) from a
standalone function. The functions should have the following
signatures:
* For line magics: `def f(line)`
* For cell magics: `def f(line, cell)`
* For a function that does both: `def f(line, cell=None)`
In the latter case, the function will be called with `cell==None` when
invoked as `%f`, and with cell as a string when invoked as `%%f`.
Parameters
----------
func : callable
Function to be registered as a magic.
magic_kind : str
Kind of magic, one of 'line', 'cell' or 'line_cell'
magic_name : optional str
If given, the name the magic will have in the IPython namespace. By
default, the name of the function itself is used.
"""
# Create the new method in the user_magics and register it in the
# global table
validate_type(magic_kind)
magic_name = func.__name__ if magic_name is None else magic_name
setattr(self.user_magics, magic_name, func)
record_magic(self.magics, magic_kind, magic_name, func)
def register_alias(self, alias_name, magic_name, magic_kind='line', magic_params=None):
"""Register an alias to a magic function.
The alias is an instance of :class:`MagicAlias`, which holds the
name and kind of the magic it should call. Binding is done at
call time, so if the underlying magic function is changed the alias
will call the new function.
Parameters
----------
alias_name : str
The name of the magic to be registered.
magic_name : str
The name of an existing magic.
magic_kind : str
Kind of magic, one of 'line' or 'cell'
"""
# `validate_type` is too permissive, as it allows 'line_cell'
# which we do not handle.
if magic_kind not in magic_kinds:
raise ValueError('magic_kind must be one of %s, %s given' %
magic_kinds, magic_kind)
alias = MagicAlias(self.shell, magic_name, magic_kind, magic_params)
setattr(self.user_magics, alias_name, alias)
record_magic(self.magics, magic_kind, alias_name, alias)
# Key base class that provides the central functionality for magics.
class Magics(Configurable):
"""Base class for implementing magic functions.
Shell functions which can be reached as %function_name. All magic
functions should accept a string, which they can parse for their own
needs. This can make some functions easier to type, eg `%cd ../`
vs. `%cd("../")`
Classes providing magic functions need to subclass this class, and they
MUST:
- Use the method decorators `@line_magic` and `@cell_magic` to decorate
individual methods as magic functions, AND
- Use the class decorator `@magics_class` to ensure that the magic
methods are properly registered at the instance level upon instance
initialization.
See :mod:`magic_functions` for examples of actual implementation classes.
"""
# Dict holding all command-line options for each magic.
options_table = None
# Dict for the mapping of magic names to methods, set by class decorator
magics = None
# Flag to check that the class decorator was properly applied
registered = False
# Instance of IPython shell
shell = None
def __init__(self, shell=None, **kwargs):
if not(self.__class__.registered):
raise ValueError('Magics subclass without registration - '
'did you forget to apply @magics_class?')
if shell is not None:
if hasattr(shell, 'configurables'):
shell.configurables.append(self)
if hasattr(shell, 'config'):
kwargs.setdefault('parent', shell)
self.shell = shell
self.options_table = {}
# The method decorators are run when the instance doesn't exist yet, so
# they can only record the names of the methods they are supposed to
# grab. Only now, that the instance exists, can we create the proper
# mapping to bound methods. So we read the info off the original names
# table and replace each method name by the actual bound method.
# But we mustn't clobber the *class* mapping, in case of multiple instances.
class_magics = self.magics
self.magics = {}
for mtype in magic_kinds:
tab = self.magics[mtype] = {}
cls_tab = class_magics[mtype]
for magic_name, meth_name in cls_tab.items():
if isinstance(meth_name, str):
# it's a method name, grab it
tab[magic_name] = getattr(self, meth_name)
else:
# it's the real thing
tab[magic_name] = meth_name
# Configurable **needs** to be initiated at the end or the config
# magics get screwed up.
super(Magics, self).__init__(**kwargs)
def arg_err(self,func):
"""Print docstring if incorrect arguments were passed"""
print('Error in arguments:')
print(oinspect.getdoc(func))
def format_latex(self, strng):
"""Format a string for latex inclusion."""
# Characters that need to be escaped for latex:
escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
# Magic command names as headers:
cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
re.MULTILINE)
# Magic commands
cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
re.MULTILINE)
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
# The "\n" symbol
newline_re = re.compile(r'\\n')
# Now build the string for output:
#strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
strng)
strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
strng = par_re.sub(r'\\\\',strng)
strng = escape_re.sub(r'\\\1',strng)
strng = newline_re.sub(r'\\textbackslash{}n',strng)
return strng
def parse_options(self, arg_str, opt_str, *long_opts, **kw):
"""Parse options passed to an argument string.
The interface is similar to that of :func:`getopt.getopt`, but it
returns a :class:`~IPython.utils.struct.Struct` with the options as keys
and the stripped argument string still as a string.
arg_str is quoted as a true sys.argv vector by using shlex.split.
This allows us to easily expand variables, glob files, quote
arguments, etc.
Parameters
----------
arg_str : str
The arguments to parse.
opt_str : str
The options specification.
mode : str, default 'string'
If given as 'list', the argument string is returned as a list (split
on whitespace) instead of a string.
list_all : bool, default False
Put all option values in lists. Normally only options
appearing more than once are put in a list.
posix : bool, default True
Whether to split the input line in POSIX mode or not, as per the
conventions outlined in the :mod:`shlex` module from the standard
library.
"""
# inject default options at the beginning of the input line
caller = sys._getframe(1).f_code.co_name
arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
mode = kw.get('mode','string')
if mode not in ['string','list']:
raise ValueError('incorrect mode given: %s' % mode)
# Get options
list_all = kw.get('list_all',0)
posix = kw.get('posix', os.name == 'posix')
strict = kw.get('strict', True)
# Check if we have more than one argument to warrant extra processing:
odict = {} # Dictionary with options
args = arg_str.split()
if len(args) >= 1:
# If the list of inputs only has 0 or 1 thing in it, there's no
# need to look for options
argv = arg_split(arg_str, posix, strict)
# Do regular option processing
try:
opts,args = getopt(argv, opt_str, long_opts)
except GetoptError as e:
raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
" ".join(long_opts)))
for o,a in opts:
if o.startswith('--'):
o = o[2:]
else:
o = o[1:]
try:
odict[o].append(a)
except AttributeError:
odict[o] = [odict[o],a]
except KeyError:
if list_all:
odict[o] = [a]
else:
odict[o] = a
# Prepare opts,args for return
opts = Struct(odict)
if mode == 'string':
args = ' '.join(args)
return opts,args
def default_option(self, fn, optstr):
"""Make an entry in the options_table for fn, with value optstr"""
if fn not in self.lsmagic():
error("%s is not a magic function" % fn)
self.options_table[fn] = optstr
class MagicAlias(object):
"""An alias to another magic function.
An alias is determined by its magic name and magic kind. Lookup
is done at call time, so if the underlying magic changes the alias
will call the new function.
Use the :meth:`MagicsManager.register_alias` method or the
`%alias_magic` magic function to create and register a new alias.
"""
def __init__(self, shell, magic_name, magic_kind, magic_params=None):
self.shell = shell
self.magic_name = magic_name
self.magic_params = magic_params
self.magic_kind = magic_kind
self.pretty_target = '%s%s' % (magic_escapes[self.magic_kind], self.magic_name)
self.__doc__ = "Alias for `%s`." % self.pretty_target
self._in_call = False
def __call__(self, *args, **kwargs):
"""Call the magic alias."""
fn = self.shell.find_magic(self.magic_name, self.magic_kind)
if fn is None:
raise UsageError("Magic `%s` not found." % self.pretty_target)
# Protect against infinite recursion.
if self._in_call:
raise UsageError("Infinite recursion detected; "
"magic aliases cannot call themselves.")
self._in_call = True
try:
if self.magic_params:
args_list = list(args)
args_list[0] = self.magic_params + " " + args[0]
args = tuple(args_list)
return fn(*args, **kwargs)
finally:
self._in_call = False
|
|
import sys
import os
sys.path.insert(0, os.environ["QUEX_PATH"])
from quex.blackboard import setup, \
E_Compression
import quex.blackboard as blackboard
from quex.input.command_line.GetPot import GetPot
import quex.input.command_line.validation as validation
from quex.input.setup import SETUP_INFO, \
SetupParTypes, \
global_extension_db, \
global_character_type_db, \
command_line_args_defined, \
command_line_arg_position, \
E_Files
from quex.output.cpp.token_id_maker import parse_token_id_file
from quex.engine.misc.file_in import error_msg, \
verify_word_in_list, \
read_namespaced_name, \
read_integer, \
open_file_or_die
import quex.engine.codec_db.core as codec_db
from quex.engine.generator.languages.core import db as quex_core_engine_generator_languages_db
from quex.engine.generator.action_info import CodeFragment
from quex.DEFINITIONS import QUEX_VERSION
from StringIO import StringIO
from operator import itemgetter
class ManualTokenClassSetup:
"""Class to mimik as 'real' TokenTypeDescriptor as defined in
quex.input.files.token_type.py. Names and functions must remain
as they are for compatibility.
"""
def __init__(self, FileName, ClassName, NameSpace, ClassNameSafe, TokenIDType):
self.__file_name = FileName
self.class_name = ClassName
self.name_space = NameSpace
self.class_name_safe = ClassNameSafe
self.column_number_type = CodeFragment("size_t")
self.line_number_type = CodeFragment("size_t")
self.token_id_type = CodeFragment(TokenIDType)
def get_file_name(self):
return self.__file_name
def manually_written(self):
return True
def do(argv):
global setup
try:
idx = argv.index("--token-class-file")
if idx + 1 < len(argv): idx += 1
else: idx = None
except:
idx = None
if idx is not None:
extra_argv = __extract_extra_options_from_file(argv[idx])
if extra_argv is not None: argv.extend(extra_argv)
command_line = __interpret_command_line(argv)
if command_line is None:
return False
return __perform_setup(command_line, argv)
def __perform_setup(command_line, argv):
"""RETURN: True, if process needs to be started.
False, if job is done.
"""
global setup
# (*) Classes and their namespace
__setup_analyzer_class(setup)
__setup_token_class(setup)
__setup_token_id_prefix(setup)
__setup_lexeme_null(setup) # Requires 'token_class_name_space'
# (*) Output programming language
setup.language = setup.language.upper()
verify_word_in_list(setup.language,
quex_core_engine_generator_languages_db.keys(),
"Programming language '%s' is not supported." % setup.language)
setup.language_db = quex_core_engine_generator_languages_db[setup.language]
setup.extension_db = global_extension_db[setup.language]
# Is the output file naming scheme provided by the extension database
# (Validation must happen immediately)
if setup.extension_db.has_key(setup.output_file_naming_scheme) == False:
error_msg("File extension scheme '%s' is not provided for language '%s'.\n" \
% (setup.output_file_naming_scheme, setup.language) + \
"Available schemes are: %s." % repr(setup.extension_db.keys())[1:-1])
# Before file names can be prepared, determine the output directory
# If 'source packaging' is enabled and no output directory is specified
# then take the directory of the source packaging.
if setup.source_package_directory != "" and setup.output_directory == "":
setup.output_directory = setup.source_package_directory
if setup.buffer_codec in ["utf8", "utf16"]:
setup.buffer_codec_transformation_info = setup.buffer_codec + "-state-split"
elif setup.buffer_codec_file != "":
try:
setup.buffer_codec = os.path.splitext(os.path.basename(setup.buffer_codec_file))[0]
except:
error_msg("cannot interpret string following '--codec-file'")
setup.buffer_codec_transformation_info = codec_db.get_codec_transformation_info(FileName=setup.buffer_codec_file)
elif setup.buffer_codec != "unicode":
setup.buffer_codec_transformation_info = codec_db.get_codec_transformation_info(setup.buffer_codec)
if setup.buffer_codec != "unicode":
setup.buffer_element_size_irrelevant = True
# (*) Output files
if setup.language not in ["DOT"]:
prepare_file_names(setup)
if setup.buffer_byte_order == "<system>":
setup.buffer_byte_order = sys.byteorder
setup.byte_order_is_that_of_current_system_f = True
else:
setup.byte_order_is_that_of_current_system_f = False
if setup.buffer_element_size == "wchar_t":
error_msg("Since Quex version 0.53.5, 'wchar_t' can no longer be specified\n"
"with option '--buffer-element-size' or '-bes'. Please, specify\n"
"'--buffer-element-type wchar_t' or '--bet'.")
if setup.buffer_element_type == "wchar_t":
setup.converter_ucs_coding_name = "WCHAR_T"
make_numbers(setup)
# (*) Determine buffer element type and size (in bytes)
if setup.buffer_element_size == -1:
if global_character_type_db.has_key(setup.buffer_element_type):
setup.buffer_element_size = global_character_type_db[setup.buffer_element_type][3]
elif setup.buffer_element_type == "":
setup.buffer_element_size = 1
else:
# If the buffer element type is defined, then here we know that it is 'unknown'
# and Quex cannot know its size on its own.
setup.buffer_element_size = -1
if setup.buffer_element_type == "":
if setup.buffer_element_size in [1, 2, 4]:
setup.buffer_element_type = {
1: "uint8_t", 2: "uint16_t", 4: "uint32_t",
}[setup.buffer_element_size]
elif setup.buffer_element_size == -1:
pass
else:
error_msg("Buffer element type cannot be determined for size '%i' which\n" \
% setup.buffer_element_size +
"has been specified by '-b' or '--buffer-element-size'.")
setup.converter_f = False
if setup.converter_iconv_f or setup.converter_icu_f:
setup.converter_f = True
# The only case where no converter helper is required is where ASCII
# (Unicode restricted to [0, FF] is used.
setup.converter_helper_required_f = True
if setup.converter_f == False and setup.buffer_element_size == 1 and setup.buffer_codec == "unicode":
setup.converter_helper_required_f = False
validation.do(setup, command_line, argv)
if setup.converter_ucs_coding_name == "":
if global_character_type_db.has_key(setup.buffer_element_type):
if setup.buffer_byte_order == "little": index = 1
else: index = 2
setup.converter_ucs_coding_name = global_character_type_db[setup.buffer_element_type][index]
if setup.token_id_foreign_definition_file != "":
CommentDelimiterList = [["//", "\n"], ["/*", "*/"]]
# Regular expression to find '#include <something>' and extract the 'something'
# in a 'group'. Note that '(' ')' cause the storage of parts of the match.
IncludeRE = "#[ \t]*include[ \t]*[\"<]([^\">]+)[\">]"
#
parse_token_id_file(setup.token_id_foreign_definition_file,
setup.token_id_prefix,
CommentDelimiterList, IncludeRE)
if setup.token_id_prefix_plain != setup.token_id_prefix:
# The 'plain' name space less token indices are also supported
parse_token_id_file(setup.token_id_foreign_definition_file,
setup.token_id_prefix_plain,
CommentDelimiterList, IncludeRE)
# (*) Compression Types
compression_type_list = []
for name, ctype in [("compression_template_f", E_Compression.TEMPLATE),
("compression_template_uniform_f", E_Compression.TEMPLATE_UNIFORM),
("compression_path_f", E_Compression.PATH),
("compression_path_uniform_f", E_Compression.PATH_UNIFORM)]:
if command_line_args_defined(command_line, name):
compression_type_list.append((command_line_arg_position(name), ctype))
compression_type_list.sort(key=itemgetter(0))
setup.compression_type_list = map(lambda x: x[1], compression_type_list)
# (*) return setup ___________________________________________________________________
return True
def __get_float(MemberName):
ValueStr = setup.__dict__[MemberName]
if type(ValueStr) == float: return ValueStr
try:
return float(ValueStr)
except:
option_name = repr(SETUP_INFO[MemberName][0])[1:-1]
error_msg("Cannot convert '%s' into an floating point number for '%s'" % (ValueStr, option_name))
def prepare_file_names(setup):
setup.output_file_stem = ""
if setup.analyzer_name_space != ["quex"]:
for name in setup.analyzer_name_space:
setup.output_file_stem += name + "_"
setup.output_file_stem += setup.analyzer_class_name
setup.output_code_file = __prepare_file_name("", E_Files.SOURCE)
setup.output_header_file = __prepare_file_name("", E_Files.HEADER)
setup.output_configuration_file = __prepare_file_name("-configuration", E_Files.HEADER)
setup.output_token_id_file = __prepare_file_name("-token_ids", E_Files.HEADER)
setup.output_token_class_file = __prepare_file_name("-token", E_Files.HEADER)
if setup.token_class_only_f == False:
setup.output_token_class_file_implementation = __prepare_file_name("-token", E_Files.HEADER_IMPLEMTATION)
else:
setup.output_token_class_file_implementation = __prepare_file_name("-token", E_Files.SOURCE)
if setup.buffer_codec == "utf8":
setup.output_buffer_codec_header = "quex/code_base/converter_helper/from-utf8"
setup.output_buffer_codec_header_i = "quex/code_base/converter_helper/from-utf8.i"
elif setup.buffer_codec == "utf16":
setup.output_buffer_codec_header = "quex/code_base/converter_helper/from-utf16"
setup.output_buffer_codec_header_i = "quex/code_base/converter_helper/from-utf16.i"
elif setup.buffer_codec == "utf32":
setup.output_buffer_codec_header = "quex/code_base/converter_helper/from-utf32"
setup.output_buffer_codec_header_i = "quex/code_base/converter_helper/from-utf32.i"
elif setup.buffer_codec != "unicode":
# Note, that the name may be set to 'None' if the conversion is utf8 or utf16
# See Internal engine character encoding'
setup.output_buffer_codec_header = \
__prepare_file_name("-converter-%s" % setup.buffer_codec, E_Files.HEADER)
setup.output_buffer_codec_header_i = \
__prepare_file_name("-converter-%s" % setup.buffer_codec, E_Files.HEADER_IMPLEMTATION)
else:
setup.output_buffer_codec_header = "quex/code_base/converter_helper/from-unicode-buffer"
setup.output_buffer_codec_header_i = "quex/code_base/converter_helper/from-unicode-buffer.i"
def make_numbers(setup):
setup.compression_template_min_gain = __get_integer("compression_template_min_gain")
setup.buffer_limit_code = __get_integer("buffer_limit_code")
setup.path_limit_code = __get_integer("path_limit_code")
setup.token_id_counter_offset = __get_integer("token_id_counter_offset")
setup.token_queue_size = __get_integer("token_queue_size")
setup.token_queue_safety_border = __get_integer("token_queue_safety_border")
setup.buffer_element_size = __get_integer("buffer_element_size")
def __get_integer(MemberName):
ValueStr = setup.__dict__[MemberName]
if type(ValueStr) == int: return ValueStr
result = read_integer(StringIO(ValueStr))
if result is None:
option_name = repr(SETUP_INFO[MemberName][0])[1:-1]
error_msg("Cannot convert '%s' into an integer for '%s'.\n" % (ValueStr, option_name) + \
"Use prefix '0x' for hexadecimal numbers.\n" + \
" '0o' for octal numbers.\n" + \
" '0b' for binary numbers.\n" + \
" '0r' for roman numbers.\n" + \
" and no prefix for decimal numbers.")
return result
def __prepare_file_name(Suffix, ContentType):
global setup
assert ContentType in E_Files
# Language + Extenstion Scheme + ContentType --> name of extension
ext = setup.extension_db[setup.output_file_naming_scheme][ContentType]
file_name = setup.output_file_stem + Suffix + ext
if setup.output_directory == "": return file_name
else: return os.path.normpath(setup.output_directory + "/" + file_name)
def __setup_analyzer_class(setup):
""" X0::X1::X2::ClassName --> analyzer_class_name = ClassName
analyzer_name_space = ["X0", "X1", "X2"]
::ClassName --> analyzer_class_name = ClassName
analyzer_name_space = []
ClassName --> analyzer_class_name = ClassName
analyzer_name_space = ["quex"]
"""
if setup.analyzer_class.find("::") == -1:
setup.analyzer_class = "quex::%s" % setup.analyzer_class
setup.analyzer_class_name, \
setup.analyzer_name_space, \
setup.analyzer_name_safe = \
read_namespaced_name(setup.analyzer_class,
"analyzer engine (options -o, --engine, --analyzer-class)")
if setup.show_name_spaces_f:
print "Analyzer: {"
print " class_name: %s;" % setup.analyzer_class_name
print " name_space: %s;" % repr(setup.analyzer_name_space)[1:-1]
print " name_prefix: %s;" % setup.analyzer_name_safe
print "}"
setup.analyzer_derived_class_name, \
setup.analyzer_derived_class_name_space, \
setup.analyzer_derived_class_name_safe = \
read_namespaced_name(setup.analyzer_derived_class_name,
"derived analyzer class (options --derived-class, --dc)",
AllowEmptyF=True)
def __setup_lexeme_null(setup):
if len(setup.external_lexeme_null_object) != 0:
lexeme_null_object = setup.external_lexeme_null_object
default_name_space = setup.analyzer_name_space
elif setup.token_class_only_f:
lexeme_null_object = "LexemeNullObject"
default_name_space = setup.token_class_name_space
else:
lexeme_null_object = "LexemeNullObject"
default_name_space = setup.analyzer_name_space
if lexeme_null_object.find("::") == -1:
# By default, setup the token in the analyzer's namespace
if len(setup.analyzer_name_space) != 0:
name_space = reduce(lambda x, y: "%s::%s" % (x, y), default_name_space)
else:
name_space = ""
lexeme_null_object = "%s::%s" % (name_space, lexeme_null_object)
setup.lexeme_null_name, \
setup.lexeme_null_namespace, \
setup.lexeme_null_name_safe = \
read_namespaced_name(lexeme_null_object,
"lexeme null object (options --lexeme-null-object, --lno)")
setup.lexeme_null_full_name_cpp = "::"
for name in setup.lexeme_null_namespace:
setup.lexeme_null_full_name_cpp += name + "::"
setup.lexeme_null_full_name_cpp += setup.lexeme_null_name
def __setup_token_class(setup):
""" X0::X1::X2::ClassName --> token_class_name = ClassName
token_name_space = ["X0", "X1", "X2"]
::ClassName --> token_class_name = ClassName
token_name_space = []
ClassName --> token_class_name = ClassName
token_name_space = analyzer_name_space
"""
if setup.token_class.find("::") == -1:
# By default, setup the token in the analyzer's namespace
if len(setup.analyzer_name_space) != 0:
analyzer_name_space = reduce(lambda x, y: "%s::%s" % (x, y), setup.analyzer_name_space)
else:
analyzer_name_space = ""
setup.token_class = "%s::%s" % (analyzer_name_space, setup.token_class)
# Token classes and derived classes have the freedom not to open a namespace,
# thus no check 'if namespace == empty'.
setup.token_class_name, \
setup.token_class_name_space, \
setup.token_class_name_safe = \
read_namespaced_name(setup.token_class,
"token class (options --token-class, --tc)")
if setup.show_name_spaces_f:
print "Token: {"
print " class_name: %s;" % setup.token_class_name
print " name_space: %s;" % repr(setup.token_class_name_space)[1:-1]
print " name_prefix: %s;" % setup.token_class_name_safe
print "}"
if setup.token_class_file != "":
blackboard.token_type_definition = \
ManualTokenClassSetup(setup.token_class_file,
setup.token_class_name,
setup.token_class_name_space,
setup.token_class_name_safe,
setup.token_id_type)
#if len(setup.token_class_name_space) == 0:
# setup.token_class_name_space = deepcopy(setup.analyzer_name_space)
def __setup_token_id_prefix(setup):
setup.token_id_prefix_plain, \
setup.token_id_prefix_name_space, \
dummy = \
read_namespaced_name(setup.token_id_prefix,
"token prefix (options --token-id-prefix)")
if len(setup.token_id_prefix_name_space) != 0 and setup.language.upper() == "C":
error_msg("Token id prefix cannot contain a namespaces if '--language' is set to 'C'.")
def __extract_extra_options_from_file(FileName):
"""Extract an option section from a given file. The quex command line
options may be given in a section surrounded by '<<<QUEX-OPTIONS>>>'
markers. For example:
<<<QUEX-OPTIONS>>>
--token-class-file Common-token
--token-class Common::Token
--token-id-type uint32_t
--buffer-element-type uint8_t
--lexeme-null-object ::Common::LexemeNullObject
--foreign-token-id-file Common-token_ids
<<<QUEX-OPTIONS>>>
This function extracts those options and builds a new 'argv' array, i.e.
an array of strings are if they would come from the command line.
"""
MARKER = "<<<QUEX-OPTIONS>>>"
fh = open_file_or_die(FileName)
while 1 + 1 == 2:
line = fh.readline()
if line == "":
return None # Simply no starting marker has been found
elif line.find(MARKER) != -1:
pos = fh.tell()
break
result = []
while 1 + 1 == 2:
line = fh.readline()
if line == "":
fh.seek(pos)
error_msg("Missing terminating '%s'." % MARKER, fh)
if line.find(MARKER) != -1:
break
idx = line.find("-")
if idx == -1: continue
options = line[idx:].split()
result.extend(options)
if len(result) == 0: return None
if setup.message_on_extra_options_f:
if len(result) < 2: arg_str = result[0]
else: arg_str = reduce(lambda x, y: "%s %s" % (x.strip(), y.strip()), result)
print "## Command line options from file '%s'" % FileName
print "## %s" % arg_str
print "## (suppress this message with --no-message-on-extra-options)"
return result
def __interpret_command_line(argv):
command_line = GetPot(argv)
if command_line.search("--version", "-v"):
print "Quex - Fast Universal Lexical Analyzer Generator"
print "Version " + QUEX_VERSION
print "(C) 2005-2012 Frank-Rene Schaefer"
print "ABSOLUTELY NO WARRANTY"
return None
if command_line.search("--help", "-h"):
print "Quex - Fast Universal Lexical Analyzer Generator"
print "Please, consult the quex documentation for further help, or"
print "visit http://quex.org"
print "(C) 2005-2012 Frank-Rene Schaefer"
print "ABSOLUTELY NO WARRANTY"
return None
for variable_name, info in SETUP_INFO.items():
# Some parameters are not set on the command line. Their entry is not associated
# with a description list.
if type(info) != list: continue
if info[1] == SetupParTypes.FLAG:
setup.__dict__[variable_name] = command_line.search(info[0])
elif info[1] == SetupParTypes.NEGATED_FLAG:
setup.__dict__[variable_name] = not command_line.search(info[0])
elif info[1] == SetupParTypes.LIST:
if not command_line.search(info[0]):
setup.__dict__[variable_name] = []
else:
the_list = command_line.nominus_followers(info[0])
if len(the_list) == 0:
error_msg("Option %s\nnot followed by anything." % repr(info[0])[1:-1])
if setup.__dict__.has_key(variable_name):
for element in the_list:
if element not in setup.__dict__[variable_name]:
setup.__dict__[variable_name].extend(the_list)
else:
setup.__dict__[variable_name] = list(set(the_list))
elif command_line.search(info[0]):
if not command_line.search(info[0]):
setup.__dict__[variable_name] = info[1]
else:
value = command_line.follow("--EMPTY--", info[0])
if value == "--EMPTY--":
error_msg("Option %s\nnot followed by anything." % repr(info[0])[1:-1])
setup.__dict__[variable_name] = value
return command_line
|
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
from tensorflow.python.ops import nn
# TODO(ipolosukhin): Merge thirdparty DNN with this.
class DNNClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):
"""A classifier for TensorFlow DNN models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNClassifier(
feature_columns=[installed_emb, impression_emb],
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns X, Y
pass
estimator.train(input_fn_train)
def input_fn_eval: # returns X, Y
pass
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
- if `feauture_columns` is None, then `input` must contains only real
valued `Tensor`.
Parameters:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. [64, 32] means first layer has 64 nodes and second one has
32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
It must be greater than 1.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If `None`,
will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
"""
def __init__(self,
hidden_units,
feature_columns=None,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu):
super(DNNClassifier, self).__init__(n_classes=n_classes,
weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn)
def _get_train_ops(self, features, targets):
"""See base class."""
if self._dnn_feature_columns is None:
self._dnn_feature_columns = layers.infer_real_valued_columns(features)
return super(DNNClassifier, self)._get_train_ops(features, targets)
class DNNRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
"""A regressor for TensorFlow DNN models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNRegressor(
feature_columns=[installed_emb, impression_emb],
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns X, Y
pass
estimator.train(input_fn_train)
def input_fn_eval: # returns X, Y
pass
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
- if `feauture_columns` is None, then `input` must contains only real
valued `Tensor`.
Parameters:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. [64, 32] means first layer has 64 nodes and second one has
32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If `None`,
will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
"""
def __init__(self,
hidden_units,
feature_columns=None,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu):
super(DNNRegressor, self).__init__(weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn)
def _get_train_ops(self, features, targets):
"""See base class."""
if self._dnn_feature_columns is None:
self._dnn_feature_columns = layers.infer_real_valued_columns(features)
return super(DNNRegressor, self)._get_train_ops(features, targets)
# TODO(ipolosukhin): Deprecate this class in favor of DNNClassifier.
class TensorFlowDNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
"""TensorFlow DNN Classifier model.
Parameters:
hidden_units: List of hidden units per layer.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam", "Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are
supposed to have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
dropout: When not None, the probability we will drop out a given coordinate.
"""
def __init__(self,
hidden_units,
n_classes,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
class_weight=None,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1,
dropout=None):
self.hidden_units = hidden_units
self.dropout = dropout
super(TensorFlowDNNClassifier, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
class_weight=class_weight,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, X, y):
return models.get_dnn_model(self.hidden_units,
models.logistic_regression,
dropout=self.dropout)(X, y)
@property
def weights_(self):
"""Returns weights of the DNN weight layers."""
return [self.get_tensor_value(w.name)
for w in self._graph.get_collection('dnn_weights')
] + [self.get_tensor_value('logistic_regression/weights')]
@property
def bias_(self):
"""Returns bias of the DNN's bias layers."""
return [self.get_tensor_value(b.name)
for b in self._graph.get_collection('dnn_biases')
] + [self.get_tensor_value('logistic_regression/bias')]
class TensorFlowDNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
"""TensorFlow DNN Regressor model.
Parameters:
hidden_units: List of hidden units per layer.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam", "Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
dropout: When not None, the probability we will drop out a given coordinate.
"""
def __init__(self,
hidden_units,
n_classes=0,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1,
dropout=None):
self.hidden_units = hidden_units
self.dropout = dropout
super(TensorFlowDNNRegressor, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, X, y):
return models.get_dnn_model(self.hidden_units,
models.linear_regression,
dropout=self.dropout)(X, y)
@property
def weights_(self):
"""Returns weights of the DNN weight layers."""
return [self.get_tensor_value(w.name)
for w in self._graph.get_collection('dnn_weights')
] + [self.get_tensor_value('linear_regression/weights')]
@property
def bias_(self):
"""Returns bias of the DNN's bias layers."""
return [self.get_tensor_value(b.name)
for b in self._graph.get_collection('dnn_biases')
] + [self.get_tensor_value('linear_regression/bias')]
|
|
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set
class T11ExplicitGet(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T11ExplicitGet, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.A_user = hydroshare.create_account(
'a_user@gmail.com',
username='A',
first_name='A First',
last_name='A Last',
superuser=False,
groups=[]
)
self.B_user = hydroshare.create_account(
'b_user@gmail.com',
username='B',
first_name='B First',
last_name='B Last',
superuser=False,
groups=[]
)
self.C_user = hydroshare.create_account(
'c_user@gmail.com',
username='C',
first_name='C First',
last_name='C Last',
superuser=False,
groups=[]
)
self.r1_resource = hydroshare.create_resource(
resource_type='GenericResource', owner=self.A_user, title='R1', metadata=[],)
self.r2_resource = hydroshare.create_resource(
resource_type='GenericResource', owner=self.A_user, title='R2', metadata=[],)
self.r3_resource = hydroshare.create_resource(
resource_type='GenericResource', owner=self.A_user, title='R3', metadata=[],)
self.A_group = self.A_user.uaccess\
.create_group(title='Test Group A',
description="This group is all about testing")
self.B_group = self.B_user.uaccess\
.create_group(title='Test Group B',
description="This group is all about testing")
self.C_group = self.C_user.uaccess\
.create_group(title='Test Group C',
description="This group is all about testing")
def test_01_user_level_access(self):
"Test all options for user-level access (the default)"
A_user = self.A_user
B_user = self.B_user
C_user = self.C_user
r1_resource = self.r1_resource
r2_resource = self.r2_resource
r3_resource = self.r3_resource
A_user.uaccess.share_resource_with_user(
r1_resource, C_user, PrivilegeCodes.OWNER)
A_user.uaccess.share_resource_with_user(
r2_resource, C_user, PrivilegeCodes.OWNER)
A_user.uaccess.share_resource_with_user(
r3_resource, C_user, PrivilegeCodes.OWNER)
foo = A_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.OWNER)
self.assertTrue(
is_equal_to_as_set(
foo, [r1_resource, r2_resource, r3_resource]))
foo = A_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.CHANGE)
self.assertTrue(is_equal_to_as_set(foo, []))
foo = A_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW)
self.assertTrue(is_equal_to_as_set(foo, []))
foo = C_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.OWNER)
self.assertTrue(
is_equal_to_as_set(
foo, [r1_resource, r2_resource, r3_resource]))
foo = C_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.CHANGE)
self.assertTrue(is_equal_to_as_set(foo, []))
foo = C_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW)
self.assertTrue(is_equal_to_as_set(foo, []))
A_user.uaccess.share_resource_with_user(
r1_resource, B_user, PrivilegeCodes.OWNER)
A_user.uaccess.share_resource_with_user(
r2_resource, B_user, PrivilegeCodes.CHANGE)
A_user.uaccess.share_resource_with_user(
r3_resource, B_user, PrivilegeCodes.VIEW)
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.OWNER)
self.assertTrue(is_equal_to_as_set(foo, [r1_resource]))
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.CHANGE)
self.assertTrue(is_equal_to_as_set(foo, [r2_resource]))
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW)
self.assertTrue(is_equal_to_as_set(foo, [r3_resource]))
# higher privileges are deleted when lower privileges are granted
C_user.uaccess.share_resource_with_user(
r1_resource, B_user, PrivilegeCodes.VIEW)
C_user.uaccess.share_resource_with_user(
r2_resource, B_user, PrivilegeCodes.VIEW)
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.OWNER)
self.assertTrue(is_equal_to_as_set(foo, [])) # [r1_resource]
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.CHANGE)
self.assertTrue(is_equal_to_as_set(foo, [])) # [r2_resource]
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
foo, [r1_resource, r2_resource, r3_resource]))
C_user.uaccess.share_resource_with_user(
r1_resource, B_user, PrivilegeCodes.CHANGE)
C_user.uaccess.share_resource_with_user(
r2_resource, B_user, PrivilegeCodes.CHANGE)
C_user.uaccess.share_resource_with_user(
r3_resource, B_user, PrivilegeCodes.CHANGE)
# higher privilege gets deleted when a lower privilege is granted
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.OWNER)
self.assertTrue(is_equal_to_as_set(foo, [])) # [r1_resource]
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
foo, [r1_resource, r2_resource, r3_resource]))
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW)
self.assertTrue(is_equal_to_as_set(foo, []))
# go from lower privilege to higher
C_user.uaccess.share_resource_with_user(
r1_resource, B_user, PrivilegeCodes.VIEW)
C_user.uaccess.share_resource_with_user(
r2_resource, B_user, PrivilegeCodes.VIEW)
C_user.uaccess.share_resource_with_user(
r3_resource, B_user, PrivilegeCodes.VIEW)
A_user.uaccess.share_resource_with_user(
r1_resource, B_user, PrivilegeCodes.CHANGE)
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.CHANGE)
self.assertTrue(is_equal_to_as_set(foo, [r1_resource]))
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW)
self.assertTrue(is_equal_to_as_set(foo, [r2_resource, r3_resource]))
A_user.uaccess.share_resource_with_user(
r1_resource, B_user, PrivilegeCodes.OWNER)
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.OWNER)
self.assertTrue(is_equal_to_as_set(foo, [r1_resource]))
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW)
self.assertTrue(is_equal_to_as_set(foo, [r2_resource, r3_resource]))
# go lower to higher
C_user.uaccess.share_resource_with_user(
r1_resource, B_user, PrivilegeCodes.VIEW)
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.OWNER)
self.assertTrue(is_equal_to_as_set(foo, []))
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.CHANGE)
self.assertTrue(is_equal_to_as_set(foo, []))
foo = B_user.uaccess.get_resources_with_explicit_access(
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
foo, [r1_resource, r2_resource, r3_resource]))
def test_02_group_level_access(self):
A_user = self.A_user
B_user = self.B_user
A_group = self.A_group
B_group = self.B_group
r1_resource = self.r1_resource
r2_resource = self.r2_resource
r3_resource = self.r3_resource
# A owns everything
g = A_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, [r1_resource, r2_resource, r3_resource]))
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
A_user.uaccess.share_resource_with_group(r1_resource, A_group, PrivilegeCodes.CHANGE)
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
A_user.uaccess.share_group_with_user(A_group, B_user, PrivilegeCodes.CHANGE)
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, [r1_resource]))
# no user owned resources
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=True,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, [r1_resource]))
# mixed dominance relationships:
# in situations where there is a higher privilege,
# lower privilege should be eliminated, even if from a different source
B_user.uaccess.share_resource_with_group(r1_resource, B_group, PrivilegeCodes.CHANGE)
A_user.uaccess.share_resource_with_user(r1_resource, B_user, PrivilegeCodes.OWNER)
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=True,
via_group=True)
# should be OWNER now
self.assertTrue(is_equal_to_as_set(g, [r1_resource]))
# OWNER squashes CHANGE
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=True,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
# OWNER squashes VIEW
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW,
via_user=True,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
def test_03_immutability(self):
A_user = self.A_user
B_user = self.B_user
C_user = self.C_user
B_group = self.B_group
r1_resource = self.r1_resource
r2_resource = self.r2_resource
r3_resource = self.r3_resource
# A owns everything
g = A_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, [r1_resource, r2_resource, r3_resource]))
# grant change access via user
A_user.uaccess.share_resource_with_user(r1_resource, B_user, PrivilegeCodes.CHANGE)
B_user.uaccess.share_resource_with_group(r1_resource, B_group, PrivilegeCodes.CHANGE)
B_user.uaccess.share_group_with_user(B_group, C_user, PrivilegeCodes.VIEW)
# B_user's CHANGE should be present
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, []))
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, [r1_resource]))
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, []))
# C_user's CHANGE should not be present for user
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, []))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, []))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, []))
# C_user's CHANGE should be present only for group
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, [r1_resource]))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
# now set immutable
r1_resource.raccess.immutable = True
r1_resource.raccess.save()
# immutable should squash CHANGE privilege to VIEW
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, []))
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, []))
g = B_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW,
via_user=True,
via_group=False)
self.assertTrue(is_equal_to_as_set(g, [r1_resource]))
# C_user's CHANGE should be squashed to VIEW
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, [r1_resource]))
# owner squashes CHANGE + immutable
A_user.uaccess.share_resource_with_user(r1_resource, C_user, PrivilegeCodes.OWNER)
# when accounting for user privileges,
# C_user's OWNER should not be squashed to VIEW
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=True,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, [r1_resource]))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=True,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW,
via_user=True,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
# but not accounting for users should leave it alone
# C_user's OWNER should be ignored and squashed to VIEW
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, []))
g = C_user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW,
via_user=False,
via_group=True)
self.assertTrue(is_equal_to_as_set(g, [r1_resource]))
|
|
#!/usr/bin/env python
'''
tile-flatten-merge.py
This script will create a file for upload containing the flattened data in the files from
the mof_dir directory (the MOF files) but only for those objects included in the corresponding
id file in coaddid_dir.
Usage: python tile-flatten-merge.py -d [mof_dir] -i [coaddid_dir] -o [out_dir]
Author: Nacho Sevilla (nsevilla@gmail.com) with some code from Erin Sheldon
Further edited by Spencer Everett to add some new features and make a less IO intensive version,
as we do not need intermediate flattened files for Balrog
Saw ~25% clock time improvement using `save_all=False`, as well as 60% less disk space
'''
import numpy as np
from astropy.io import fits
import fitsio
from astropy.table import Table, join
import os
import sys
from argparse import ArgumentParser
try:
# Python 2
xrange
except NameError:
# Python 3
xrange = range
parser = ArgumentParser()
parser.add_argument(
'data_dir',
type=str,
help='Directory where data is read from'
)
parser.add_argument(
'--extra_data_dir',
type=str,
default=None,
help='Sometimes an additional data directory is needed,'
' e.g. for extracted SE cat files that can\'t be saved'
' in the original data directory.'
)
parser.add_argument(
'--out_dir',
type=str,
default=None,
help='Directory where merged data is stored'
)
parser.add_argument(
'--save_all',
action='store_true',
default=False,
help='Set to save all flattened files'
)
parser.add_argument(
'--mode',
type=str,
default='all',
choices=['all', 'mof', 'sof'],
help='Can choose to include only one of MOF and SOF in merging & flattening'
)
parser.add_argument(
'--vb',
action='store_true',
default=False,
help='Set to print out more information'
)
printout = False
bands = ['g','r','i','z']
onedim_cols = ['id','number','NUMBER','fofid','fof_id','coadd_run','flags','time_last_fit',
'box_size','bdf_flags','obj_flags','mask_frac','masked_frac','psf_T','psfrec_T',
'cm_flags','cm_T','cm_T_err','cm_T_s2n','cm_weight','cm_max_flags','cm_max_T',
'cm_max_T_err','cm_max_T_s2n','cm_s2n_w','cm_chi2per','cm_dof','cm_flags_r',
'cm_s2n_r','cm_T_r','cm_psf_T_r','cm_fracdev','cm_fracdev_noclip','cm_fracdec_err',
'cm_TdByTe','cm_TdByTe_noclip','cm_mof_flags','cm_mof_num_itr','fofind','ra','dec',
'bdf_nfev','bdf_s2n','bdf_T','bdf_T_err','bdf_T_ratio','bdf_fracdev',
'bdf_fracdev_err','flagstr']
onedim_cols_addband = ['FLAGS','MAG_AUTO','X_IMAGE','Y_IMAGE','XWIN_IMAGE','YWIN_IMAGE',
'ERRAWIN_IMAGE','ERRBWIN_IMAGE','ERRTHETAWIN_IMAGE','ALPHAWIN_J2000',
'DELTAWIN_J2000','A_IMAGE','B_IMAGE','A_WORLD','B_WORLD','XMIN_IMAGE',
'XMAX_IMAGE','YMIN_IMAGE','YMAX_IMAGE','THETA_J2000','FLUX_RADIUS',
'IMAFLAGS_ISO','FLUX_AUTO','FLUXERR_AUTO','MAGERR_AUTO','KRON_RADIUS',
'BACKGROUND','THRESHOLD','FWHM_IMAGE','FWHM_WORLD']
bidim_cols = ['cm_pars_cov','cm_max_pars_cov','cm_g_cov','bdf_g_cov','bdf_pars_cov']
band_cols = ['nimage_tot','psf_flags','psf_flux','psf_flux_err','psf_flux_s2n','psf_flux_flags',
'psf_mag','nimage_use','cm_flux_cov','cm_flux','cm_flux_s2n','cm_mag','cm_logsb',
'cm_max_flux_cov','cm_max_flux','cm_max_flux_s2n','cm_max_mag','cm_max_logsb',
'bdf_flux','bdf_mag','bdf_flux_err']
bidim_band_cols = ['cm_flux_cov','cm_max_flux_cov','cm_pars_cov',
'cm_max_pars_cov','cm_g_cov','bdf_flux_cov']
multi_cols = ['psf_g','psfrec_g','cm_pars','cm_g','cm_max_pars', 'cm_mof_abs_diff',
'cm_mof_frac_diff','cm_mof_err_diff','bdf_pars','bdf_pars_err','bdf_g']
multi_cols_add = ['FLUX_APER','FLUXERR_APER','MAG_APER','MAGERR_APER']
def get_coldefs(descr, defs={}, bands=None, band_cols=None, bidim_cols=None):
"""
Convert a numpy descriptor to a set of oracle
column definitions
array columns are converted to name_{dim1}_{dim2}...{dimn}
parameters
----------
descr: numpy type descriptor
E.g. arr.dtype.descr
defs: dict,optional
A dict returning a list of field defs. It is keyed by field names from
the array. This can be used to over-ride the defaults, e.g. to use a
different name or to over-ride conversions for arrays.
"""
if defs is None:
defs={}
alldefs=[]
def_template='%s not null'
for d in descr:
name=d[0]
ot=get_oracle_type(d[1])
if name in defs:
alldefs += defs[name]
elif len(d) == 2:
# this is a scalar column... easy!
defi=def_template % ot
alldefs.append( (name,defi,d[1]) )
else:
dims=d[2]
if not isinstance(dims,tuple):
dims=(dims,)
if (bands is not None
and band_cols is not None
and name in band_cols):
names=get_band_arr_colnames(name,dims,bands,bidim_cols)
else:
names=get_arr_colnames(name,dims)
for n in names:
defi=def_template % (ot)
alldefs.append( (n,defi,d[1]) )
return alldefs
def get_arr_colnames(name, dims):
"""
Get db names for an array, naming
name_{num1}_{num2}...
"""
ndim=len(dims)
if ndim==1:
names=get_arr1_colnames(name,dims)
elif ndim==2:
names=get_arr2_colnames(name,dims)
else:
raise ValueError("only support 1 and 2 d arrays")
return names
def get_arr1_colnames(name, dims):
"""
Get db names for an array, naming
name_{num}
"""
names=[]
for n in xrange(1,dims[0]+1):
names.append( '%s_%d' % (name,n) )
return names
def get_arr2_colnames(name, dims):
"""
Get db names for an array, naming
name_{num1}_{num2}
"""
names=[]
for n1 in xrange(1,dims[0]+1):
for n2 in xrange(1,dims[1]+1):
names.append( '%s_%d_%d' % (name,n1,n2) )
return names
def get_band_arr_colnames(name, dims, bands, bidim_cols):
"""
Get db names for an array, naming
name_{num1}_{num2}...
"""
ndim=len(dims)
if ndim==1 and (name not in bidim_cols):
names=get_band_arr1_colnames(name,dims,bands)
elif ndim==1 and (name in bidim_cols):
names=get_band_arr2_colnames(name,[np.sqrt(dims),np.sqrt(dims)],bands)
elif ndim==2:
names=get_band_arr2_colnames(name,dims,bands)
else:
raise ValueError("only support 1 and 2 d arrays")
return names
def get_band_arr1_colnames(name, dims, bands):
"""
Get db names for an array, naming
name_{num}
"""
names=[]
for i in xrange(dims[0]):
n=bands[i]
names.append( '%s_%s' % (name,n) )
return names
def get_band_arr2_colnames(name, dims, bands):
"""
Get db names for an array, naming
name_{num1}_{num2}
"""
names=[]
for i1 in xrange(dims[0]):
n1=bands[i1]
for i2 in xrange(dims[1]):
n2=bands[i2]
names.append( '%s_%s_%s' % (name,n1,n2) )
return names
def get_oracle_type(nt):
if 'f4' in nt:
ot='binary_float'
elif 'f8' in nt:
ot='binary_double'
elif 'i1' in nt or 'u1' in nt:
ot='number(3)'
elif 'i2' in nt or 'u2' in nt:
ot='number(5)'
elif 'i4' in nt:
ot='number(10)'
elif 'i8' in nt:
ot='number(19)'
elif 'u8' in nt:
ot='number(20)'
elif 'S' in nt:
slen=nt[2:]
ot='varchar2(%s)' % slen
else:
raise ValueError("unsupported numpy type: '%s'" % nt)
return ot
def get_fits_type(name):
if name == "id":
format = 'K'
elif name.lower() == "number":
format = 'J'
elif 'nimage_tot' in name:
format = 'J'
elif name == "fofid":
format = 'K'
elif name == "fof_id":
format = 'K'
elif name == "ra":
format = 'D'
elif name == "dec":
format = 'D'
elif name == "XMIN_IMAGE":
format = 'J'
elif name == "YMIN_IMAGE":
format = 'J'
elif name == "XMAX_IMAGE":
format = 'J'
elif name == "YMAX_IMAGE":
format = 'J'
elif name == "X_IMAGE":
format = 'E'
elif name == "Y_IMAGE":
format = 'E'
elif name == "XWIN_IMAGE":
format = 'D'
elif name == "YWIN_IMAGE":
format = 'D'
elif name == "ERRAWIN_IMAGE":
format = 'E'
elif name == "ERRBWIN_IMAGE":
format = 'E'
elif name == "ERRTHETAWIN_IMAGE":
format = 'E'
elif name == "ALPHAWIN_J2000":
format = 'D'
elif name == "DELTAWIN_J2000":
format = 'D'
elif name == "A_IMAGE":
format = 'E'
elif name == "B_IMAGE":
format = 'E'
elif name == "THETA_J2000":
format = 'E'
elif "WORLD" in name:
format = 'E'
elif "RADIUS" in name:
format = 'E'
elif "APER" in name:
format = 'E'
elif "AUTO" in name:
format = 'E'
elif "FWHM" in name:
format = 'E'
elif name == "THRESHOLD":
format = 'E'
elif name == "BACKGROUND":
format = 'E'
elif name == 'flagstr':
format = 'A32'
elif "flags" in name.lower():
format = 'J'
elif name == "time_last_fit":
format = 'D'
elif name == "box_size":
format = 'I'
elif "s2n" in name:
format = 'D'
elif "_T" in name:
format = 'D'
elif "_mag" in name:
format = 'D'
elif "nimage_use" in name:
format = 'J'
elif "frac" in name:
format = 'D'
elif "_g" in name:
format = 'D'
elif "pars" in name:
format = 'D'
elif "_flux" in name:
format = 'D'
elif "gapflux" in name:
format = 'D'
elif "_logsb" in name:
format = 'D'
elif "fracdev" in name:
format = 'D'
elif name == "cm_weight":
format = 'D'
elif name == "cm_chi2per":
format = 'D'
elif name == "cm_dof":
format = 'D'
elif name == "cm_TdByTe":
format = 'E'
elif name == "cm_mof_num_itr":
format = 'J'
elif "cm_mof_abs_diff" in name:
format = 'D'
elif "cm_mof_err_diff" in name:
format = 'D'
elif name == "bdf_nfev":
format = 'J'
elif name == 'fofind':
format = 'K'
else:
print('{} not found'.format(name))
sys.exit(1)
return format
def check_name(name,cols,prev,strip,printout=False):
check = False
n = 0
colname = prev
for enum,col in enumerate(cols):
if printout:
print(col,name[0:len(name)-strip])
if col == name[0:len(name)-strip]:
check = True
colname = col
break
return (check,colname)
def merge(args, tilename, filelist):
data_dir = args.data_dir
out_dir = args.out_dir
save_all = args.save_all
Nfiles = len(filelist)
#--------------------------------------------------------------------------------
# Nacho's original code: The below works if all files are saved out individually,
# but this is pretty IO intensive and Balrog doesn't need the intermediate files
if save_all is True:
flatten(data_dir, filelist, out_dir, tilename, save_all=True)
for f, fname in enumerate(filelist):
basename = os.path.basename(fname)
flatname = os.path.splitext(basename)[0]+'_flat.fits'
print('Merging {}'.format(flatname))
if f == 0:
merged = Table.read(os.path.join(out_dir, flatname), format='fits')
else:
t = Table.read(os.path.join(out_dir, flatname), format='fits')
# merged = join(t,merged,keys='NUMBER') # Original
# There are multiple cols that are identical
# (NUMBER, RA, DEC, etc.), so I think it is best to use all
merged = join(t, merged)
#--------------------------------------------------------------------------------
# Can make merged table directly instead
else:
merged = flatten(data_dir, filelist, out_dir, tilename, save_all=False)
if tilename == '':
newfile = os.path.join(out_dir, 'merged.fits')
merged.write(newfile, format='fits', overwrite=True)
print('Wrote {}'.format(os.path.join(out_dir, 'merged.fits')))
else:
newfile = os.path.join(out_dir, tilename+'_merged.fits')
merged.write(newfile, format='fits', overwrite=True)
print('Wrote {}'.format(os.path.join(out_dir, tilename+'_merged.fits')))
return
def flatten(data_dir, filelist, out_dir, tilename, save_all=False):
for fid, data_file in enumerate(filelist):
print('Flattening {}'.format(os.path.basename(data_file)))
# data_hdu = fits.open(os.path.join(data_dir,data_file))
# data_hdu = fits.open(data_file)
# data_tab = data_hdu[1].data
#fitsio version:
data_tab = fitsio.read(data_file)
defs = {}
descr = data_tab.view(np.ndarray).dtype.descr
alldefs = get_coldefs(descr,defs,bands,band_cols,bidim_band_cols)
names = [d[0] for d in alldefs]
formats = [d[2] for d in alldefs]
cols = []
prev_colname = ''
prev_colname_add = ''
prev_colname_band = ''
prev_colname_multi = ''
prev_colname_multi_add = ''
prev_colname_bidim = ''
prev_colname_bidim_band = ''
k = 0
m = 0
mofsofstr = ''
if '-mof' in data_file:
mofsofstr = 'MOF_'
elif '-sof' in data_file:
mofsofstr = 'SOF_'
else:
mofsofstr = ''
for i in xrange(len(names)):
nm = names[i].split('_')[len(names[i].split('_'))-1]
strip = len(nm)
if printout:
print('Checking {}, {}'.format(names[i],strip))
check_cols,colname = check_name(names[i],onedim_cols,prev_colname,0)
check_band_cols,colname_band = check_name(names[i],band_cols,prev_colname_band,strip+1)
check_bidim_cols,colname_bidim = check_name(names[i],bidim_cols,prev_colname_bidim,strip+1)
check_bidim_band_cols,colname_bidim_band = check_name(names[i],bidim_band_cols,prev_colname_bidim_band,4)
check_multi_cols,colname_multi = check_name(names[i],multi_cols,prev_colname_multi,strip+1)
check_cols_add,colname_add = check_name(names[i],onedim_cols_addband,prev_colname_add,0)
check_multi_cols_add,colname_multi_add = check_name(names[i],multi_cols_add,prev_colname_multi_add,strip+1)
if printout:
print(check_cols,check_band_cols,check_bidim_cols,check_bidim_band_cols,check_multi_cols,check_cols_add,check_multi_cols_add)
if i == 0:
n = 0
m = 0
if i > 0 and (prev_colname != colname or
colname_band != prev_colname_band or
colname_bidim != prev_colname_bidim or
colname_bidim_band != prev_colname_bidim_band or
colname_multi != prev_colname_multi):
n = 0
m = 0
k = k+1
if check_band_cols == True:
cols.append(fits.Column(name=mofsofstr+names[i].upper(),format=get_fits_type(names[i]),array=data_tab[colname_band][:,n]))
n = n + 1
prev_colname_band = colname_band
elif check_bidim_cols == True:
cols.append(fits.Column(name=mofsofstr+names[i].upper(),format=get_fits_type(names[i]),array=data_tab[colname_bidim][:,n,m]))
if n == len(data_tab[colname_bidim][0])-1:
n = 0
m = m + 1
else:
n = n + 1
prev_colname_bidim = colname_bidim
elif check_bidim_band_cols == True:
cols.append(fits.Column(name=mofsofstr+names[i].upper(),format=get_fits_type(names[i]),array=data_tab[colname_bidim_band][:,n,m]))
if n == len(data_tab[colname_bidim_band][0])-1:
n = 0
m = m + 1
else:
n = n + 1
prev_colname_bidim_band = colname_bidim_band
elif check_multi_cols == True:
cols.append(fits.Column(name=mofsofstr+names[i].upper(),format=get_fits_type(names[i]),array=data_tab[colname_multi][:,n]))
if n == len(data_tab[colname_multi][0])-1:
n = 0
else:
n = n + 1
prev_colname_multi = colname_multi
elif check_multi_cols_add == True:
idx = data_file.find('_cat')
fileband = data_file[idx-1:idx]
#print data_tab[colname_multi_add][:,n],n
#print names[i]+'_'+fileband.upper(),colname_multi_add
cols.append(fits.Column(name=mofsofstr+names[i].upper()+'_'+fileband.upper(),format=get_fits_type(names[i]),array=data_tab[colname_multi_add][:,n]))
if n == len(data_tab[colname_multi_add][0])-1:
n = 0
else:
n = n + 1
prev_colname_multi_add = colname_multi_add
elif check_cols_add == True:
idx = data_file.find('_cat')
fileband = data_file[idx-1:idx]
newname = names[i].upper()+'_'+fileband.upper()
cols.append(fits.Column(name=mofsofstr+newname,format=get_fits_type(names[i]),array=data_tab[names[i]]))
prev_colname_add = colname_add
elif 'gap' in names[i]:
continue
else:
if names[i] == "id":
newname = "COADD_OBJECT_ID"
elif names[i] == "number":
newname = "NUMBER"
elif names[i] == "ra":
newname = "RA"
elif names[i] == "dec":
newname = "DEC"
else:
newname = mofsofstr+names[i].upper()
cols.append(fits.Column(name=newname,format=get_fits_type(names[i]),array=data_tab[names[i]]))
prev_colname = colname
if "-mof" in data_file or "-sof" in data_file:
for b,band in enumerate(bands):
psf_mag_err = 1.0857362*data_tab['psf_flux_err'][:,b]/data_tab['psf_flux'][:,b]
psf_mag_err[psf_mag_err == 1.0857362] = -9999
cm_mag_err = 1.0857362*np.sqrt(data_tab['cm_flux_cov'][:,b,b])/data_tab['cm_flux'][:,b]
cm_mag_err[cm_mag_err == 1.0857362] = -9999
cm_mag_err = [-9999 if np.isnan(x) else x for x in cm_mag_err]
cols.append(fits.Column(name=mofsofstr+'psf_mag_err_'+band,format='D',array=psf_mag_err))
cols.append(fits.Column(name=mofsofstr+'cm_mag_err_'+band,format='D',array=cm_mag_err))
if fid == 0 and tilename != '':
cols.append(fits.Column(name='TILENAME',format='27A',array=data_tab.size*[tilename]))
new_hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
new_tbdata = new_hdu.data
# Original Nacho code
if save_all is True:
new_hdu = fits.BinTableHDU(data=new_tbdata)
basename = os.path.basename(data_file)
out_file = os.path.join(out_dir,os.path.splitext(basename)[0]+'_flat.fits')
new_hdu.writeto(out_file,clobber='True')
print('Wrote {}'.format(out_file))
# Not saving individual files, less IO intensive:
else:
if fid == 0:
merged = Table(new_tbdata)
else:
merged = join(Table(new_tbdata), merged)
print('Merged {}'.format(os.path.basename(data_file)))
if save_all is False:
return merged
def get_files(data_dir):
if not os.path.isdir(data_dir):
print('Path with data not found at {}'.format(data_dir))
sys.exit(1)
return [os.path.join(data_dir, f) for f in os.listdir(data_dir)
if os.path.isfile(os.path.join(data_dir, f))]
def main():
# Parse command line
args = parser.parse_args()
if args.out_dir is None:
args.out_dir = args.data_dir
if not os.path.isdir(args.out_dir):
os.mkdir(args.out_dir)
print('Reading data from {}'.format(args.data_dir))
print('Writing files to upload to {}'.format(args.out_dir))
# Sometimes we will run on outputs that only run one of MOF or SOF
mode = args.mode
print('Running in mode {}'.format(mode))
check_string = ['g_cat','r_cat','i_cat','z_cat']
if mode == 'all':
check_string.append('-mof')
check_string.append('-sof')
elif mode == 'mof':
check_string.append('-mof')
elif mode == 'sof':
check_string.append('-sof')
print('Getting list of files...')
data_files = get_files(args.data_dir)
if args.extra_data_dir is not None:
data_files += get_files(args.extra_data_dir)
chk = 0
select_files = []
tilename = ''
for check in check_string:
for n, data_file in enumerate(data_files):
if tilename == '':
idx = data_file.find('DES')
tilename = data_file[idx:idx+12]
if check in data_file:
if '_flat' in data_file:
continue
chk += 1
select_files.append(data_file)
print('Appending {}'.format(os.path.basename(data_file)))
break
if n == (len(data_files)-1):
raise OSError('Cannot find file with {} for tile {}'.format(check, tilename))
# New setup flattens during merge
merge(args, tilename, select_files)
if __name__ == '__main__':
sys.exit(main())
|
|
"""setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
# This module should be kept compatible with Python 2.3
import os, re
from setuptools import Command
from distutils.errors import *
from distutils import log
from setuptools.command.sdist import sdist
from distutils.util import convert_path
from distutils.filelist import FileList
from pkg_resources import parse_requirements, safe_name, parse_version, \
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename
from sdist import walk_revctrl
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-svn-revision', 'r',
"Add subversion revision ID to version number"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-svn-revision', 'R',
"Don't add subversion revision ID [default]"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date', 'tag-svn-revision']
negative_opt = {'no-svn-revision': 'tag-svn-revision',
'no-date': 'tag-date'}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_svn_revision = 0
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
def save_version_info(self, filename):
from setopt import edit_config
edit_config(
filename,
{'egg_info':
{'tag_svn_revision':0, 'tag_date': 0, 'tag_build': self.tags()}
}
)
def finalize_options (self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
try:
list(
parse_requirements('%s==%s' % (self.egg_name,self.egg_version))
)
except ValueError:
raise DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name,self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('',os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name)+'.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name: self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key==self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
return safe_version(self.distribution.get_version() + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
writer = ep.load(installer=installer)
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version+=self.tag_build
if self.tag_svn_revision and (
os.path.exists('.svn') or os.path.exists('PKG-INFO')
): version += '-r%s' % self.get_svn_revision()
if self.tag_date:
import time; version += time.strftime("-%Y%m%d")
return version
def get_svn_revision(self):
revision = 0
urlre = re.compile('url="([^"]+)"')
revre = re.compile('committed-rev="(\d+)"')
for base,dirs,files in os.walk(os.curdir):
if '.svn' not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove('.svn')
f = open(os.path.join(base,'.svn','entries'))
data = f.read()
f.close()
if data.startswith('<?xml'):
dirurl = urlre.search(data).group(1) # get repository URL
localrev = max([int(m.group(1)) for m in revre.finditer(data)]+[0])
else:
try: svnver = int(data.splitlines()[0])
except: svnver=-1
if data<8:
log.warn("unrecognized .svn/entries format; skipping %s", base)
dirs[:] = []
continue
data = map(str.splitlines,data.split('\n\x0c\n'))
del data[0][0] # get rid of the '8' or '9'
dirurl = data[0][3]
localrev = max([int(d[9]) for d in data if len(d)>9 and d[9]]+[0])
if base==os.curdir:
base_url = dirurl+'/' # save the root url
elif not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return str(revision or get_pkg_info_revision())
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info,"SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name+'.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-"*78+'\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n'+'-'*78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(FileList):
"""File list that accepts only existing, platform-independent paths"""
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if os.path.exists(path):
self.files.append(path)
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options (self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.filelist.findall()
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def write_manifest (self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
files = self.filelist.files
if os.sep!='/':
files = [f.replace(os.sep,'/') for f in files]
self.execute(write_file, (self.manifest, files),
"writing manifest file '%s'" % self.manifest)
def warn(self, msg): # suppress missing-file warnings from sdist
if not msg.startswith("standard file not found:"):
sdist.warn(self, msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
def prune_file_list (self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(sep+r'(RCS|CVS|\.svn)'+sep, is_regex=1)
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "wb") # always write POSIX-style manifest
f.write("\n".join(contents))
f.close()
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution,'zip_safe',None)
import bdist_egg; bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = ['\n'.join(yield_lines(dist.install_requires or ()))]
for extra,reqs in (dist.extras_require or {}).items():
data.append('\n\n[%s]\n%s' % (extra, '\n'.join(yield_lines(reqs))))
cmd.write_or_delete_file("requirements", filename, ''.join(data))
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[k.split('.',1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(pkgs)+'\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value)+'\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep,basestring) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in ep.items():
if not isinstance(contents,basestring):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(map(str,contents.values()))
data.append('[%s]\n%s\n\n' % (section,contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
# See if we can get a -r### off of PKG-INFO, in case this is an sdist of
# a subversion revision
#
if os.path.exists('PKG-INFO'):
f = open('PKG-INFO','rU')
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
return 0
#
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
import pymongo
class Migration(SchemaMigration):
def forwards(self, orm):
from moocng.mongodb import get_db
db = get_db()
activity = db.get_collection('groups')
activity.create_index([('id_course', pymongo.ASCENDING)])
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254'})
},
'badges.alignment': {
'Meta': {'object_name': 'Alignment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Badge'},
'alignments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'alignments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Alignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'criteria': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'tags'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'badges.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courses.announcement': {
'Meta': {'ordering': "('-datetime',)", 'object_name': 'Announcement'},
'content': ('tinymce.models.HTMLField', [], {}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']", 'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.attachment': {
'Meta': {'object_name': 'Attachment'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']"})
},
'courses.course': {
'Meta': {'ordering': "['order']", 'object_name': 'Course'},
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'completion_badge': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'course'", 'null': 'True', 'to': "orm['badges.Badge']"}),
'created_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'courses_created_of'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['courses.Course']"}),
'description': ('tinymce.models.HTMLField', [], {}),
'ects': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_method': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '200'}),
'estimated_effort': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'forum_slug': ('django.db.models.fields.CharField', [], {'max_length': '350', 'null': 'True', 'blank': 'True'}),
'hashtag': ('django.db.models.fields.CharField', [], {'default': "'Hashtag'", 'max_length': '128'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_audience': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'is_activity_clonable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['courses.Language']", 'symmetrical': 'False'}),
'learning_goals': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'max_mass_emails_month': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'courses_as_owner'", 'to': "orm['auth.User']"}),
'promotion_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'promotion_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'requirements': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'static_page': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['courses.StaticPage']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'courses_as_student'", 'blank': 'True', 'through': "orm['courses.CourseStudent']", 'to': "orm['auth.User']"}),
'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses_as_teacher'", 'symmetrical': 'False', 'through': "orm['courses.CourseTeacher']", 'to': "orm['auth.User']"}),
'threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_alt': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user_score': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'courses.coursestudent': {
'Meta': {'object_name': 'CourseStudent'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_course_status': ('django.db.models.fields.CharField', [], {'default': "'f'", 'max_length': '1'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rate': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.courseteacher': {
'Meta': {'ordering': "['order']", 'object_name': 'CourseTeacher'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.knowledgequantum': {
'Meta': {'ordering': "['order']", 'unique_together': "(('title', 'unit'),)", 'object_name': 'KnowledgeQuantum'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'supplementary_material': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'teacher_comments': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Unit']"}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'courses.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.option': {
'Meta': {'unique_together': "(('question', 'x', 'y'),)", 'object_name': 'Option'},
'feedback': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'optiontype': ('django.db.models.fields.CharField', [], {'default': "'t'", 'max_length': '1'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Question']"}),
'solution': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '100'}),
'x': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'y': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'courses.question': {
'Meta': {'object_name': 'Question'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']", 'unique': 'True'}),
'last_frame': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'solution_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'solution_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'solution_text': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'use_last_frame': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'courses.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'body': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'courses.unit': {
'Meta': {'ordering': "['order']", 'unique_together': "(('title', 'course'),)", 'object_name': 'Unit'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unittype': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
}
}
complete_apps = ['courses']
|
|
import os,rarfile,zipfile,tarfile,re,sys
import comics
from shutil import copyfile
from django.conf import settings
from operator import attrgetter
from . import fnameparser
from . import utils
from urllib.parse import quote
class ComicFileHandler(object):
def __init__(self):
# Set the unrar tool based on filesystem
if os.getenv('TENMA_UNRAR_PATH'):
rarfile.UNRAR_TOOL = os.getenv('TENMA_UNRAR_PATH')
elif sys.platform == 'win32': # Windows
rarfile.UNRAR_TOOL = os.path.dirname(comics.__file__) + "/utils/unrar/unrar.exe"
elif sys.platform == 'darwin': # Mac
rarfile.UNRAR_TOOL = os.path.dirname(comics.__file__) + "/utils/unrar/unrar_mac"
elif sys.platform == 'linux': # Linux
rarfile.UNRAR_TOOL = os.path.dirname(comics.__file__) + "/utils/unrar/unrar-nonfree_ubuntu"
#==================================================================================================
def extract_comic(self, file, id):
'''
Extract all the pages from a comic book file.
Returns a dictionary containing the mediaurl and a list of files.
'''
filename = os.path.basename(file)
ext = os.path.splitext(filename)[1].lower()
mediaroot = settings.MEDIA_ROOT + '/temp/'
mediaurl = settings.MEDIA_URL + 'temp/' + str(id) + '/'
temppath = mediaroot + str(id)
tempfile = mediaroot + filename
# File validation
if utils.valid_comic_file(filename):
# If directory already exists, return it.
# Otherwise, create the directory.
if os.path.isdir(temppath):
if not os.listdir(temppath) == []:
pages = self._get_file_list(temppath)
return {'mediaurl': mediaurl, 'pages': pages}
else:
os.mkdir(temppath)
# Create temp file if not found.
if not os.path.isfile(tempfile):
copyfile(file, tempfile)
os.chmod(tempfile, 0o777)
if ext == '.pdf':
utils.extract_images_from_PDF(file, temppath)
else:
# Change extension if needed
comic_file = self.normalise_comic_extension(tempfile)
# Get extractor
extractor = self.get_extractor(comic_file)
extractor.extractall(path=temppath)
if ext == '.zip' or '.cbz':
extractor.close()
# Delete the file after extraction so that space isn't wasted.
if os.path.isfile(tempfile):
os.remove(tempfile)
elif os.path.isfile(comic_file):
os.remove(comic_file)
# Get a list of pages
pages = self._get_file_list(temppath)
for root, dirs, files in os.walk(temppath):
for file in files:
if utils.valid_image_file(file):
image_path = root + '/' + file
utils.optimize_image(image_path, 75, 1920)
return {'mediaurl': mediaurl, 'pages': pages}
#==================================================================================================
def extract_cover(self, file):
'''
Extract the cover image from a comic file.
Returns a path to the cover image.
'''
filename = os.path.basename(file)
ext = os.path.splitext(filename)[1].lower()
mediaroot = settings.MEDIA_ROOT + '/images/'
mediaurl = 'media/images/'
tempfile = mediaroot + filename
cover = ''
# File validation
if utils.valid_comic_file(filename):
# Copy file to temp directory
copyfile(file, tempfile)
os.chmod(tempfile, 0o777)
if ext == '.pdf':
cover = utils.extract_first_image_from_PDF(file, mediaroot)
cover = mediaurl + cover
else:
# Change extension if needed
comic_file = self.normalise_comic_extension(tempfile)
# Get extractor
extractor = self.get_extractor(comic_file)
# Get cover file name
first_image = self._get_first_image(extractor.namelist())
normalised_file = self._normalise_image_name(first_image)
cover_filename = os.path.splitext(normalised_file)[0] + '-' + os.path.splitext(filename)[0] + os.path.splitext(normalised_file)[1]
# Delete existing cover if it exists
self._delete_existing_cover(mediaroot + cover_filename)
# Extract, rename, and optimize cover image
extractor.extract(first_image, path=mediaroot)
os.rename(mediaroot + first_image, mediaroot + cover_filename)
cover = mediaurl + cover_filename
# Close out zip extractor
if ext == '.zip' or '.cbz':
extractor.close()
# Optimize cover image
utils.optimize_image(cover, 75, 540)
# Delete the temp comic file
if os.path.isfile(tempfile):
os.remove(tempfile)
elif os.path.isfile(comic_file):
os.remove(comic_file)
return cover
#==================================================================================================
def get_page_count(self, file):
page_count = 0
filename = os.path.basename(file)
ext = os.path.splitext(filename)[1].lower()
mediaroot = settings.MEDIA_ROOT + '/images/'
tempfile = mediaroot + filename
# File validation
if utils.valid_comic_file(filename):
# Copy file to temp directory
copyfile(file, tempfile)
os.chmod(tempfile, 0o777)
if ext == '.pdf':
page_count = utils.get_PDF_page_count(file)
else:
# Change extension if needed
comic_file = self.normalise_comic_extension(tempfile)
# Get extractor
extractor = self.get_extractor(comic_file)
for file in extractor.infolist():
if utils.valid_image_file(file.filename):
page_count += 1
# Close out zip extractor
if ext == '.zip' or '.cbz':
extractor.close()
# Delete the temp comic file
if os.path.isfile(tempfile):
os.remove(tempfile)
elif os.path.isfile(comic_file):
os.remove(comic_file)
return page_count
#==================================================================================================
def _get_file_list(self, filepath):
'''
Returns a sorted list of image files for a comic. Filenames are changed
to numbers so filepaths stay short.
'''
pages = []
for root, dirs, files in os.walk(filepath):
sorted_files = sorted(files)
i = 0
for file in sorted_files:
if utils.valid_image_file(file):
file_ext = os.path.splitext(file)[1].lower()
path = os.path.join(root,file)
numbered_file = "%03d" % (i,) + file_ext
os.rename(path, filepath + '/' + numbered_file)
i += 1
newpath = numbered_file.replace(filepath + '/', '')
if os.name == 'nt':
newpath = numbered_file.replace(filepath + '\\', '')
pages.append(quote(newpath))
return pages
#==================================================================================================
def _get_first_image(self, filelist):
''' Returns the name of the first file from a sorted list. '''
sorted_list = sorted(filelist)
for file in sorted_list:
if utils.valid_image_file(file):
return file
#==================================================================================================
def _delete_existing_cover(self, filepath):
''' Deletes cover image if found. '''
if os.path.isfile(filepath):
os.chmod(filepath, 0o777)
os.remove(filepath)
#==================================================================================================
def _normalise_image_name(self, filepath):
''' Returns a normalised image name. '''
path_normalise = re.compile(r"[/\\]{1,}")
filename = path_normalise.sub("`", filepath).split('`')[-1]
return filename
#==================================================================================================
def normalise_comic_extension(self, comic_file):
''' Set correct extension if necessary '''
ext = os.path.splitext(comic_file)[1].lower()
c = comic_file
if ext == '.cbr':
c = c.replace('.cbr', '.rar')
elif ext == '.cbz':
c = c.replace('.cbz', '.zip')
elif ext == '.cbt':
c = c.replace('.cbt', '.tar')
os.rename(comic_file, c)
return c
#==================================================================================================
def get_extractor(self, comic_file):
''' Return extractor based on file extension '''
# Get extractor
ext = os.path.splitext(comic_file)[1].lower()
e = None
if ext == '.rar':
e = rarfile.RarFile(comic_file)
if ext == '.zip':
e = zipfile.ZipFile(comic_file)
if ext == '.tar':
e = tarfile.TarFile(comic_file)
return e
|
|
from flask import Flask, request, redirect, url_for, jsonify, session, render_template, abort
from functools import wraps
import requests
import urllib.parse
import os
import json
import sys
SETTINGS_FILENAME = 'loco.json'
if not os.path.exists(SETTINGS_FILENAME):
print("Cannot find settings file `{}`".format(SETTINGS_FILENAME))
sys.exit(1)
app_settings = json.load(open(SETTINGS_FILENAME))
app = Flask(__name__)
app.config['RC_API_URI'] = 'http://www.recurse.com/api/v1'
app.config['RC_OAUTH_AUTH_URI'] = 'https://www.recurse.com/oauth/authorize'
app.config['RC_OAUTH_TOKEN_URI'] = 'https://www.recurse.com/oauth/token'
app.config['RC_OAUTH_CLIENT_ID'] = app_settings['client_id']
app.config['RC_OAUTH_CLIENT_SECRET'] = app_settings['client_secret']
app.config['RC_OAUTH_CLIENT_REDIRECT_URI'] = app_settings['redirect_uri']
app.config['SESSION_SECRET'] = app_settings['session_secret']
app.secret_key = app.config['SESSION_SECRET']
# DB Stuff
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from geoalchemy2.types import Geometry
from geoalchemy2.elements import WKTElement, WKBElement
from geoalchemy2.shape import to_shape
# TODO: Read database credentials from file, env variables etc.
app.config['SQLALCHEMY_DATABASE_URI'] = app_settings["db_uri"]
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class RcLoco(db.Model):
__tablename__ = 'locos'
id = db.Column(db.BigInteger, primary_key=True)
name = db.Column(db.String(256))
image = db.Column(db.Text)
address = db.Column(db.Text)
coords = db.Column(Geometry(geometry_type='POINT', srid=4326))
is_shared = db.Column(db.Boolean, default=False)
class ModelSerializer(json.JSONEncoder):
OMITTED_KEYS = {'_sa_instance_state'}
def default(self, o):
try:
d = {}
for k, v in o.__dict__.items():
if k not in self.OMITTED_KEYS:
if k == "coords" and isinstance(v, WKTElement) \
or isinstance(v, WKBElement):
lat, lng = list(to_shape(v).coords)[0]
d['lat'] = lat
d['lng'] = lng
else:
d[k] = v
return d
except AttributeError:
return {}
# App
sessions = {}
def check_authentication(method):
@wraps(method)
def auth_checked(*args, **kwargs):
if 'user' not in session or session['user'] not in sessions:
params = {
"client_id": app.config['RC_OAUTH_CLIENT_ID'],
"redirect_uri": app.config['RC_OAUTH_CLIENT_REDIRECT_URI'],
"response_type": 'code'
}
url_params = urllib.parse.urlencode(params)
oauth_url = "%s?%s" % (app.config['RC_OAUTH_AUTH_URI'], url_params)
return redirect(oauth_url)
else:
# User already authenticated
return method(*args, **kwargs)
return auth_checked
@app.route('/', defaults={'lat':None, 'lng':None})
@app.route('/share/<lat>/<lng>')
@check_authentication
def index(lat, lng):
return render_template('index.html')
@app.route('/token', methods=['GET', 'POST'])
def access_token():
code = request.args.get('code')
params = {
'client_id': app.config['RC_OAUTH_CLIENT_ID'],
'client_secret': app.config['RC_OAUTH_CLIENT_SECRET'],
'redirect_uri': app.config['RC_OAUTH_CLIENT_REDIRECT_URI'],
'grant_type': 'authorization_code',
'code': code
}
req = requests.post(app.config['RC_OAUTH_TOKEN_URI'], data=params)
data = json.loads(req.text)
if 'access_token' in data:
token = data['access_token']
user = get_user(token)
# save user session
user_id = user['id']
session['user'] = user_id
sessions[user_id] = token
return redirect(url_for('index'))
else:
abort(401, 'Go away!')
def make_header(access_token):
headers = {
'Authorization': 'Bearer %s' % access_token,
'Accepts' : 'application/json'
}
return headers
def serialize(method):
@wraps(method)
def serialized_response(*args, **kwargs):
response = method(*args, **kwargs)
if hasattr(response, 'content_type'):
# Don't attempt to serialize Werkzeug response wrappers
return response
return json.dumps(response, cls=ModelSerializer)
return serialized_response
@app.route('/locos', methods=['GET'])
@serialize
@check_authentication
def get_locos():
locos = RcLoco.query.filter_by(is_shared=True).filter(RcLoco.coords!=None).all()
return locos
@app.route('/update', methods=['POST'])
@serialize
@check_authentication
def update_loco():
'''Update user location info'''
location_data = request.get_json()
if not location_data:
abort(400)
if 'lat' not in location_data and 'lng' not in location_data:
abort(400)
lat = location_data['lat']
lng = location_data['lng']
loco_id = session['user']
loco = RcLoco.query.filter_by(id=loco_id).first()
loco.coords = WKTElement('POINT({} {})'.format(lat, lng),
srid=4326)
loco.is_shared = True
db.session.add(loco)
db.session.commit()
return {"result": "success"}
@app.route('/update-share', methods=['PUT'])
@serialize
@check_authentication
def update_share():
'''Update user sharing options'''
new_sharing = request.get_json()
loco_id = session['user']
loco = RcLoco.query.filter_by(id=loco_id).first()
loco.is_shared = new_sharing['isShared']
db.session.add(loco)
db.session.commit()
return {"result": "success"}
@app.route('/is-shared', methods=['GET'])
@serialize
@check_authentication
def is_shared():
loco_id = session['user']
u = RcLoco.query.filter_by(id=loco_id).first()
return {'isShared': u.is_shared}
# def get_batch(access_token, batch_id):
# headers = make_header(access_token)
# req = requests.get("%s/batches/%d/people" % (app.config['RC_API_URI'], batch_id), headers=headers)
# return req.json()
# def get_batches(access_token):
# headers = make_header(access_token)
# req = requests.get("%s/batches" % app.config['RC_API_URI'], headers=headers)
# return json.loads(req.text)
def get_user(access_token):
headers = make_header(access_token)
req = requests.get("%s/people/me" % app.config['RC_API_URI'], headers=headers)
u = req.json()
# If user data is not saved in our db, save
if 'id' not in u:
abort(401)
loco = RcLoco.query.get(int(u['id']))
if not loco:
name = "{} {}".format(u['first_name'], u['last_name'])
loco = RcLoco(
id=u['id'],
name=name,
image=u['image'],
)
db.session.add(loco)
db.session.commit()
return u
if __name__ == '__main__':
app.debug = True
app.secret_key = app.config['SESSION_SECRET']
port = 5000
app.run(host='0.0.0.0', port=port)
|
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import six
from py4j.protocol import Py4JJavaError
from py4j.java_gateway import JavaObject
from py4j.java_collections import ListConverter, JavaArray, JavaList, JavaMap, MapConverter
from py4j.java_gateway import JavaGateway, GatewayClient
from pyspark import RDD, SparkContext
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import DataFrame, SQLContext
from pyspark.mllib.common import callJavaFunc
from pyspark import SparkConf
from pyspark.files import SparkFiles
import numpy as np
import threading
import tempfile
import traceback
from bigdl.util.engine import get_bigdl_classpath, is_spark_below_2_2
INTMAX = 2147483647
INTMIN = -2147483648
DOUBLEMAX = 1.7976931348623157E308
if sys.version >= '3':
long = int
unicode = str
class SingletonMixin(object):
_lock = threading.RLock()
_instance = None
@classmethod
def instance(cls,
bigdl_type, *args):
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = cls(bigdl_type, *args)
return cls._instance
class GatewayWrapper(SingletonMixin):
def __init__(self, bigdl_type, port=25333):
self.value = JavaGateway(GatewayClient(port=port), auto_convert=True)
class JavaCreator(SingletonMixin):
__creator_class=["com.intel.analytics.bigdl.python.api.PythonBigDLKeras"]
@classmethod
def add_creator_class(cls, jinvoker):
with JavaCreator._lock:
JavaCreator.__creator_class.append(jinvoker)
JavaCreator._instance = None
@classmethod
def get_creator_class(cls):
with JavaCreator._lock:
return JavaCreator.__creator_class
@classmethod
def set_creator_class(cls, cclass):
if isinstance(cclass, six.string_types):
cclass = [cclass]
with JavaCreator._lock:
JavaCreator.__creator_class = cclass
JavaCreator._instance = None
def __init__(self, bigdl_type, gateway):
self.value = []
for creator_class in JavaCreator.get_creator_class():
jclass = getattr(gateway.jvm, creator_class)
if bigdl_type == "float":
self.value.append(getattr(jclass, "ofFloat")())
elif bigdl_type == "double":
self.value.append(getattr(jclass, "ofDouble")())
else:
raise Exception("Not supported bigdl_type: %s" % bigdl_type)
class JavaValue(object):
def jvm_class_constructor(self):
name = "create" + self.__class__.__name__
print("creating: " + name)
return name
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, self.jvm_class_constructor(), *args)
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString()
class EvaluatedResult():
"""
A testing result used to benchmark the model quality.
"""
def __init__(self, result, total_num, method):
"""
:param result: the validation result. i.e: top1 accuracy percentage.
:param total_num: the total processed records.
:param method: the validation method. i.e: Top1Accuracy
"""
self.result = result
self.total_num = total_num
self.method = method
def __reduce__(self):
return (EvaluatedResult, (self.result, self.total_num, self.method))
def __str__(self):
return "Evaluated result: %s, total_num: %s, method: %s" % (
self.result, self.total_num, self.method)
def get_dtype(bigdl_type):
# Always return float32 for now
return "float32"
class JActivity(object):
def __init__(self, value):
self.value = value
class JTensor(object):
"""
A wrapper to easy our work when need to pass or return Tensor to/from Scala.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> np.random.seed(123)
>>>
"""
def __init__(self, storage, shape, bigdl_type="float", indices=None):
"""
:param storage: values in this tensor
:param shape: shape of this tensor
:param bigdl_type: numeric type
:param indices: if indices is provided, means this is a SparseTensor;
if not provided, means this is a DenseTensor
"""
if isinstance(storage, bytes) and isinstance(shape, bytes):
self.storage = np.frombuffer(storage, dtype=get_dtype(bigdl_type))
self.shape = np.frombuffer(shape, dtype=np.int32)
else:
self.storage = np.array(storage, dtype=get_dtype(bigdl_type))
self.shape = np.array(shape, dtype=np.int32)
if indices is None:
self.indices = None
elif isinstance(indices, bytes):
self.indices = np.frombuffer(indices, dtype=np.int32)
else:
assert isinstance(indices, np.ndarray), \
"indices should be a np.ndarray, not %s, %s" % (type(a_ndarray), str(indices))
self.indices = np.array(indices, dtype=np.int32)
self.bigdl_type = bigdl_type
@classmethod
def from_ndarray(cls, a_ndarray, bigdl_type="float"):
"""
Convert a ndarray to a DenseTensor which would be used in Java side.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.random.uniform(0, 1, (2, 3)).astype("float32")
>>> result = JTensor.from_ndarray(data)
>>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]])
>>> expected_shape = np.array([2, 3])
>>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> data_back = result.to_ndarray()
>>> (data == data_back).all()
True
>>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> (array_from_tensor == data).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"input should be a np.ndarray, not %s" % type(a_ndarray)
return cls(a_ndarray,
a_ndarray.shape if a_ndarray.shape else (a_ndarray.size),
bigdl_type)
@classmethod
def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type="float"):
"""
Convert a three ndarray to SparseTensor which would be used in Java side.
For example:
a_ndarray = [1, 3, 2, 4]
i_ndarray = [[0, 0, 1, 2],
[0, 3, 2, 1]]
shape = [3, 4]
Present a dense tensor
[[ 1, 0, 0, 3],
[ 0, 0, 2, 0],
[ 0, 4, 0, 0]]
:param a_ndarray non-zero elements in this SparseTensor
:param i_ndarray zero-based indices for non-zero element
i_ndarray's shape should be (shape.size, a_ndarray.size)
And the i-th non-zero elements indices is i_ndarray[:, 1]
:param shape shape as a DenseTensor.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.arange(1, 7).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> result = JTensor.sparse(data, indices, shape)
>>> expected_storage = np.array([1., 2., 3., 4., 5., 6.])
>>> expected_shape = np.array([10])
>>> expected_indices = np.array([1, 2, 3, 4, 5, 6])
>>> np.testing.assert_allclose(result.storage, expected_storage)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> np.testing.assert_allclose(result.indices, expected_indices)
>>> tensor1 = callBigDlFunc("float", "testTensor", result) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> expected_ndarray = np.array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> (array_from_tensor == expected_ndarray).all()
True
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"values array should be a np.ndarray, not %s" % type(a_ndarray)
assert isinstance(i_ndarray, np.ndarray), \
"indices array should be a np.ndarray, not %s" % type(a_ndarray)
assert i_ndarray.size == a_ndarray.size * shape.size, \
"size of values and indices should match."
return cls(a_ndarray,
shape,
bigdl_type,
i_ndarray)
def to_ndarray(self):
"""
Transfer JTensor to ndarray.
As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor.
:return: a ndarray
"""
assert self.indices is None, "sparseTensor to ndarray is not supported"
return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape) # noqa
def __reduce__(self):
if self.indices is None:
return JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type)
else:
return JTensor, (self.storage.tostring(), self.shape.tostring(), self.bigdl_type, self.indices.tostring())
def __str__(self):
return self.__repr__()
def __repr__(self):
indices = "" if self.indices is None else " ,indices %s" % str(self.indices)
return "JTensor: storage: %s, shape: %s%s, %s" % (str(self.storage), str(self.shape), indices, self.bigdl_type)
class Sample(object):
def __init__(self, features, labels, bigdl_type="float"):
"""
User should always use Sample.from_ndarray to construct Sample.
:param features: a list of JTensors
:param labels: a list of JTensors
:param bigdl_type: "double" or "float"
"""
self.feature = features[0]
self.features = features
self.label = labels[0]
self.bigdl_type = bigdl_type
self.labels = labels
@classmethod
def from_ndarray(cls, features, labels, bigdl_type="float"):
"""
Convert a ndarray of features and labels to Sample, which would be used in Java side.
:param features: an ndarray or a list of ndarrays
:param labels: an ndarray or a list of ndarrays or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> from bigdl.util.common import callBigDlFunc
>>> from numpy.testing import assert_allclose
>>> np.random.seed(123)
>>> sample = Sample.from_ndarray(np.random.random((2,3)), np.random.random((2,3)))
>>> sample_back = callBigDlFunc("float", "testSample", sample)
>>> assert_allclose(sample.features[0].to_ndarray(), sample_back.features[0].to_ndarray())
>>> assert_allclose(sample.label.to_ndarray(), sample_back.label.to_ndarray())
>>> expected_feature_storage = np.array(([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]]))
>>> expected_feature_shape = np.array([2, 3])
>>> expected_label_storage = np.array(([[0.98076421, 0.68482971, 0.48093191], [0.39211753, 0.343178, 0.72904968]]))
>>> expected_label_shape = np.array([2, 3])
>>> assert_allclose(sample.features[0].storage, expected_feature_storage, rtol=1e-6, atol=1e-6)
>>> assert_allclose(sample.features[0].shape, expected_feature_shape)
>>> assert_allclose(sample.labels[0].storage, expected_label_storage, rtol=1e-6, atol=1e-6)
>>> assert_allclose(sample.labels[0].shape, expected_label_shape)
"""
if isinstance(features, np.ndarray):
features = [features]
else:
assert all(isinstance(feature, np.ndarray) for feature in features), \
"features should be a list of np.ndarray, not %s" % type(features)
if np.isscalar(labels): # in case labels is a scalar.
labels = [np.array(labels)]
elif isinstance(labels, np.ndarray):
labels = [labels]
else:
assert all(isinstance(label, np.ndarray) for label in labels), \
"labels should be a list of np.ndarray, not %s" % type(labels)
return cls(
features=[JTensor.from_ndarray(feature) for feature in features],
labels=[JTensor.from_ndarray(label) for label in labels],
bigdl_type=bigdl_type)
@classmethod
def from_jtensor(cls, features, labels, bigdl_type="float"):
"""
Convert a sequence of JTensor to Sample, which would be used in Java side.
:param features: an JTensor or a list of JTensor
:param labels: an JTensor or a list of JTensor or a scalar
:param bigdl_type: "double" or "float"
>>> import numpy as np
>>> data = np.random.uniform(0, 1, (6)).astype("float32")
>>> indices = np.arange(1, 7)
>>> shape = np.array([10])
>>> feature0 = JTensor.sparse(data, indices, shape)
>>> feature1 = JTensor.from_ndarray(np.random.uniform(0, 1, (2, 3)).astype("float32"))
>>> sample = Sample.from_jtensor([feature0, feature1], 1)
"""
if isinstance(features, JTensor):
features = [features]
else:
assert all(isinstance(feature, JTensor) for feature in features), \
"features should be a list of JTensor, not %s" % type(features)
if np.isscalar(labels): # in case labels is a scalar.
labels = [JTensor.from_ndarray(np.array(labels))]
elif isinstance(labels, JTensor):
labels = [labels]
else:
assert all(isinstance(label, JTensor) for label in labels), \
"labels should be a list of np.ndarray, not %s" % type(labels)
return cls(
features=features,
labels=labels,
bigdl_type=bigdl_type)
def __reduce__(self):
return Sample, (self.features, self.labels, self.bigdl_type)
def __str__(self):
return "Sample: features: %s, labels: %s," % (self.features, self.labels)
def __repr__(self):
return "Sample: features: %s, labels: %s" % (self.features, self.labels)
class RNG():
"""
generate tensor data with seed
"""
def __init__(self, bigdl_type="float"):
self.bigdl_type = bigdl_type
def set_seed(self, seed):
callBigDlFunc(self.bigdl_type, "setModelSeed", seed)
def uniform(self, a, b, size):
return callBigDlFunc(self.bigdl_type, "uniform", a, b, size).to_ndarray() # noqa
_picklable_classes = [
'LinkedList',
'SparseVector',
'DenseVector',
'DenseMatrix',
'Rating',
'LabeledPoint',
'Sample',
'EvaluatedResult',
'JTensor',
'JActivity'
]
def init_engine(bigdl_type="float"):
callBigDlFunc(bigdl_type, "initEngine")
# Spark context is supposed to have been created when init_engine is called
get_spark_context()._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.initialize()
def init_executor_gateway(sc, bigdl_type="float"):
callBigDlFunc(bigdl_type, "initExecutorGateway", sc, sc._gateway._gateway_client.port)
def get_node_and_core_number(bigdl_type="float"):
result = callBigDlFunc(bigdl_type, "getNodeAndCoreNumber")
return result[0], result[1]
def redire_spark_logs(bigdl_type="float", log_path=os.getcwd()+"/bigdl.log"):
"""
Redirect spark logs to the specified path.
:param bigdl_type: "double" or "float"
:param log_path: the file path to be redirected to; the default file is under the current workspace named `bigdl.log`.
"""
callBigDlFunc(bigdl_type, "redirectSparkLogs", log_path)
def show_bigdl_info_logs(bigdl_type="float"):
"""
Set BigDL log level to INFO.
:param bigdl_type: "double" or "float"
"""
callBigDlFunc(bigdl_type, "showBigDlInfoLogs")
def get_bigdl_conf():
bigdl_conf_file = "spark-bigdl.conf"
bigdl_python_wrapper = "python-api.zip"
def load_conf(conf_str):
return dict(line.split() for line in conf_str.split("\n") if
"#" not in line and line.strip())
for p in sys.path:
if bigdl_conf_file in p and os.path.isfile(p):
with open(p) if sys.version_info < (3,) else open(p, encoding='latin-1') as conf_file: # noqa
return load_conf(conf_file.read())
if bigdl_python_wrapper in p and os.path.isfile(p):
import zipfile
with zipfile.ZipFile(p, 'r') as zip_conf:
if bigdl_conf_file in zip_conf.namelist():
content = zip_conf.read(bigdl_conf_file)
if sys.version_info >= (3,):
content = str(content, 'latin-1')
return load_conf(content)
return {}
def to_list(a):
if type(a) is list:
return a
return [a]
def to_sample_rdd(x, y, numSlices=None):
"""
Conver x and y into RDD[Sample]
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
"""
sc = get_spark_context()
from bigdl.util.common import Sample
x_rdd = sc.parallelize(x, numSlices)
y_rdd = sc.parallelize(y, numSlices)
return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1]))
def extend_spark_driver_cp(sparkConf, path):
original_driver_classpath = ":" + sparkConf.get("spark.driver.extraClassPath") \
if sparkConf.contains("spark.driver.extraClassPath") else ""
sparkConf.set("spark.driver.extraClassPath", path + original_driver_classpath)
def create_spark_conf():
bigdl_conf = get_bigdl_conf()
sparkConf = SparkConf()
sparkConf.setAll(bigdl_conf.items())
if os.environ.get("BIGDL_JARS", None) and not is_spark_below_2_2():
for jar in os.environ["BIGDL_JARS"].split(":"):
extend_spark_driver_cp(sparkConf, jar)
# add content in PYSPARK_FILES in spark.submit.pyFiles
# This is a workaround for current Spark on k8s
python_lib = os.environ.get('PYSPARK_FILES', None)
if python_lib:
existing_py_files = sparkConf.get("spark.submit.pyFiles")
if existing_py_files:
sparkConf.set(key="spark.submit.pyFiles", value="%s,%s" % (python_lib, existing_py_files))
else:
sparkConf.set(key="spark.submit.pyFiles", value=python_lib)
return sparkConf
def get_spark_context(conf=None):
"""
Get the current active spark context and create one if no active instance
:param conf: combining bigdl configs into spark conf
:return: SparkContext
"""
if hasattr(SparkContext, "getOrCreate"):
with SparkContext._lock:
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext.getOrCreate(spark_conf)
else:
return SparkContext.getOrCreate()
else:
# Might have threading issue but we cann't add _lock here
# as it's not RLock in spark1.5;
if SparkContext._active_spark_context is None:
spark_conf = create_spark_conf() if conf is None else conf
return SparkContext(conf=spark_conf)
else:
return SparkContext._active_spark_context
def get_spark_sql_context(sc):
if "getOrCreate" in SQLContext.__dict__:
return SQLContext.getOrCreate(sc)
else:
return SQLContext(sc) # Compatible with Spark1.5.1
def _get_port():
root_dir = SparkFiles.getRootDirectory()
path = os.path.join(root_dir, "gateway_port")
try:
with open(path) as f:
port = int(f.readline())
except IOError as e:
traceback.print_exc()
raise RuntimeError("Could not open the file %s, which contains the listening port of"
" local Java Gateway, please make sure the init_executor_gateway()"
" function is called before any call of java function on the"
" executor side." % e.filename)
return port
def _get_gateway():
if SparkFiles._is_running_on_worker:
gateway_port = _get_port()
gateway = GatewayWrapper.instance(None, gateway_port).value
else:
sc = get_spark_context()
gateway = sc._gateway
return gateway
def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
gateway = _get_gateway()
error = Exception("Cannot find function: %s" % name)
for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:
# hasattr(jinvoker, name) always return true here,
# so you need to invoke the method to check if it exist or not
try:
api = getattr(jinvoker, name)
result = callJavaFunc(api, *args)
except Exception as e:
error = e
if "does not exist" not in str(e):
raise e
else:
return result
raise error
def _java2py(gateway, r, encoding="bytes"):
if isinstance(r, JavaObject):
clsName = r.getClass().getSimpleName()
# convert RDD into JavaRDD
if clsName != 'JavaRDD' and clsName.endswith("RDD"):
r = r.toJavaRDD()
clsName = 'JavaRDD'
if clsName == 'JavaRDD':
jrdd = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.javaToPython(r)
return RDD(jrdd, get_spark_context())
if clsName == 'DataFrame':
return DataFrame(r, get_spark_sql_context(get_spark_context()))
if clsName == 'Dataset':
return DataFrame(r, get_spark_sql_context(get_spark_context()))
if clsName == "ImageFrame[]":
return r
if clsName in _picklable_classes:
r = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(r)
elif isinstance(r, (JavaArray, JavaList, JavaMap)):
try:
r = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(
r)
except Py4JJavaError:
pass # not pickable
if isinstance(r, (bytearray, bytes)):
r = PickleSerializer().loads(bytes(r), encoding=encoding)
return r
def callJavaFunc(func, *args):
""" Call Java Function """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
result = func(*args)
return _java2py(gateway, result)
def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True)
def _py2java(gateway, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(gateway, x) for x in obj],
gateway._gateway_client)
elif isinstance(obj, dict):
result = {}
for (key, value) in obj.items():
result[key] = _py2java(gateway, value)
obj = MapConverter().convert(result, gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj
def create_tmp_path():
tmp_file = tempfile.NamedTemporaryFile(prefix="bigdl")
tmp_file.close()
return tmp_file.name
def text_from_path(path):
sc = get_spark_context()
return sc.textFile(path).collect()[0]
def get_local_file(a_path):
if not is_distributed(a_path):
return a_path
path, data = get_spark_context().binaryFiles(a_path).collect()[0]
local_file_path = create_tmp_path()
with open(local_file_path, 'w') as local_file:
local_file.write(data)
return local_file_path
def is_distributed(path):
return "://" in path
def get_activation_by_name(activation_name, activation_id=None):
""" Convert to a bigdl activation layer
given the name of the activation as a string """
import bigdl.nn.layer as BLayer
activation = None
activation_name = activation_name.lower()
if activation_name == "tanh":
activation = BLayer.Tanh()
elif activation_name == "sigmoid":
activation = BLayer.Sigmoid()
elif activation_name == "hard_sigmoid":
activation = BLayer.HardSigmoid()
elif activation_name == "relu":
activation = BLayer.ReLU()
elif activation_name == "softmax":
activation = BLayer.SoftMax()
elif activation_name == "softplus":
activation = BLayer.SoftPlus(beta=1.0)
elif activation_name == "softsign":
activation = BLayer.SoftSign()
elif activation_name == "linear":
activation = BLayer.Identity()
else:
raise Exception("Unsupported activation type: %s" % activation_name)
if not activation_id:
activation.set_name(activation_id)
return activation
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import layer
globs = layer.__dict__.copy()
sc = SparkContext(master="local[2]", appName="test common utility")
globs['sc'] = sc
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug-wrapped sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
from tensorflow.python.util import tf_inspect
class TestDebugWrapperSession(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test."""
def __init__(self, sess, dump_root, observer, thread_name_filter=None):
# Supply dump root.
self._dump_root = dump_root
# Supply observer.
self._obs = observer
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
def on_session_init(self, request):
"""Override abstract on-session-init callback method."""
self._obs["sess_init_count"] += 1
self._obs["request_sess"] = request.session
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Override abstract on-run-start callback method."""
self._obs["on_run_start_count"] += 1
self._obs["run_fetches"] = request.fetches
self._obs["run_feed_dict"] = request.feed_dict
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
["file://" + self._dump_root])
def on_run_end(self, request):
"""Override abstract on-run-end callback method."""
self._obs["on_run_end_count"] += 1
self._obs["performed_action"] = request.performed_action
self._obs["tf_error"] = request.tf_error
return framework.OnRunEndResponse()
class TestDebugWrapperSessionBadAction(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test.
This class intentionally puts a bad action value in OnSessionInitResponse
and/or in OnRunStartAction to test the handling of such invalid cases.
"""
def __init__(
self,
sess,
bad_init_action=None,
bad_run_start_action=None,
bad_debug_urls=None):
"""Constructor.
Args:
sess: The TensorFlow Session object to be wrapped.
bad_init_action: (str) bad action value to be returned during the
on-session-init callback.
bad_run_start_action: (str) bad action value to be returned during the
the on-run-start callback.
bad_debug_urls: Bad URL values to be returned during the on-run-start
callback.
"""
self._bad_init_action = bad_init_action
self._bad_run_start_action = bad_run_start_action
self._bad_debug_urls = bad_debug_urls
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(self, sess)
def on_session_init(self, request):
if self._bad_init_action:
return framework.OnSessionInitResponse(self._bad_init_action)
else:
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
debug_urls = self._bad_debug_urls or []
if self._bad_run_start_action:
return framework.OnRunStartResponse(
self._bad_run_start_action, debug_urls)
else:
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN, debug_urls)
def on_run_end(self, request):
return framework.OnRunEndResponse()
@test_util.run_deprecated_v1
class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def setUp(self):
self._observer = {
"sess_init_count": 0,
"request_sess": None,
"on_run_start_count": 0,
"run_fetches": None,
"run_feed_dict": None,
"on_run_end_count": 0,
"performed_action": None,
"tf_error": None,
}
self._dump_root = tempfile.mkdtemp()
self._sess = session.Session(config=self._no_rewrite_session_config())
self._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
self._b_init_val = np.array([[2.0], [-1.0]])
self._c_val = np.array([[-4.0], [6.0]])
self._a_init = constant_op.constant(
self._a_init_val, shape=[2, 2], name="a_init")
self._b_init = constant_op.constant(
self._b_init_val, shape=[2, 1], name="b_init")
self._ph = array_ops.placeholder(dtype=dtypes.float64, name="ph")
self._a = variables.Variable(self._a_init, name="a1")
self._b = variables.Variable(self._b_init, name="b")
self._c = constant_op.constant(self._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
self._p = math_ops.matmul(self._a, self._b, name="p1")
# Matrix product of a and ph.
self._q = math_ops.matmul(self._a, self._ph, name="q")
# Sum of two vectors.
self._s = math_ops.add(self._p, self._c, name="s")
# Initialize the variables.
self._sess.run(self._a.initializer)
self._sess.run(self._b.initializer)
def tearDown(self):
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
file_io.delete_recursively(self._dump_root)
ops.reset_default_graph()
def testSessionInit(self):
self.assertEqual(0, self._observer["sess_init_count"])
wrapper_sess = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# Assert that on-session-init callback is invoked.
self.assertEqual(1, self._observer["sess_init_count"])
# Assert that the request to the on-session-init callback carries the
# correct session object.
self.assertEqual(self._sess, self._observer["request_sess"])
# Verify that the wrapper session implements the session.SessionInterface.
self.assertTrue(isinstance(wrapper_sess, session.SessionInterface))
self.assertEqual(self._sess.sess_str, wrapper_sess.sess_str)
self.assertEqual(self._sess.graph, wrapper_sess.graph)
self.assertEqual(self._sess.graph_def, wrapper_sess.graph_def)
# Check that the partial_run_setup and partial_run are not implemented for
# the debug wrapper session.
with self.assertRaises(NotImplementedError):
wrapper_sess.partial_run_setup(self._p)
def testInteractiveSessionInit(self):
"""The wrapper should work also on other subclasses of session.Session."""
TestDebugWrapperSession(
session.InteractiveSession(), self._dump_root, self._observer)
def testSessionRun(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer)
# Check initial state of the observer.
self.assertEqual(0, self._observer["on_run_start_count"])
self.assertEqual(0, self._observer["on_run_end_count"])
s = wrapper.run(self._s)
# Assert the run return value is correct.
self.assertAllClose(np.array([[3.0], [4.0]]), s)
# Assert the on-run-start method is invoked.
self.assertEqual(1, self._observer["on_run_start_count"])
# Assert the on-run-start request reflects the correct fetch.
self.assertEqual(self._s, self._observer["run_fetches"])
# Assert the on-run-start request reflects the correct feed_dict.
self.assertIsNone(self._observer["run_feed_dict"])
# Assert the file debug URL has led to dump on the filesystem.
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(7, len(dump.dumped_tensor_data))
# Assert the on-run-end method is invoked.
self.assertEqual(1, self._observer["on_run_end_count"])
# Assert the performed action field in the on-run-end callback request is
# correct.
self.assertEqual(
framework.OnRunStartAction.DEBUG_RUN,
self._observer["performed_action"])
# No TensorFlow runtime error should have happened.
self.assertIsNone(self._observer["tf_error"])
def testSessionInitInvalidSessionType(self):
"""Attempt to wrap a non-Session-type object should cause an exception."""
wrapper = TestDebugWrapperSessionBadAction(self._sess)
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
TestDebugWrapperSessionBadAction(wrapper)
def testSessionInitBadActionValue(self):
with self.assertRaisesRegexp(
ValueError, "Invalid OnSessionInitAction value: nonsense_action"):
TestDebugWrapperSessionBadAction(
self._sess, bad_init_action="nonsense_action")
def testRunStartBadActionValue(self):
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_run_start_action="nonsense_action")
with self.assertRaisesRegexp(
ValueError, "Invalid OnRunStartAction value: nonsense_action"):
wrapper.run(self._s)
def testRunStartBadURLs(self):
# debug_urls ought to be a list of str, not a str. So an exception should
# be raised during a run() call.
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_debug_urls="file://foo")
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
wrapper.run(self._s)
def testErrorDuringRun(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# No matrix size mismatch.
self.assertAllClose(
np.array([[11.0], [-1.0]]),
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertIsNone(self._observer["tf_error"])
# Now there should be a matrix size mismatch error.
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0], [3.0]])})
self.assertEqual(2, self._observer["on_run_end_count"])
self.assertTrue(
isinstance(self._observer["tf_error"], errors.InvalidArgumentError))
def testUsingWrappedSessionShouldWorkAsContextManager(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper as sess:
self.assertAllClose([[3.0], [4.0]], self._s.eval())
self.assertEqual(1, self._observer["on_run_start_count"])
self.assertEqual(self._s, self._observer["run_fetches"])
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertAllClose(
[[11.0], [-1.0]],
sess.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(2, self._observer["on_run_start_count"])
self.assertEqual(self._q, self._observer["run_fetches"])
self.assertEqual(2, self._observer["on_run_end_count"])
def testUsingWrappedSessionShouldSupportEvalWithAsDefault(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper.as_default():
foo = constant_op.constant(42, name="foo")
self.assertEqual(42, self.evaluate(foo))
self.assertEqual(foo, self._observer["run_fetches"])
def testWrapperShouldSupportSessionClose(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
wrapper.close()
def testWrapperThreadNameFilterMainThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter="MainThread")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("a_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterChildThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=r"Child.*")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("b_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterBothThreads(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=None)
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertEqual(2, dump.size)
self.assertItemsEqual(
["a_init", "b_init"],
[datum.node_name for datum in dump.dumped_tensor_data])
def _is_public_method_name(method_name):
return (method_name.startswith("__") and method_name.endswith("__")
or not method_name.startswith("_"))
class SessionWrapperPublicMethodParityTest(test_util.TensorFlowTestCase):
def testWrapperHasAllPublicMethodsOfSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(session.Session, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
def testWrapperHasAllPublicMethodsOfMonitoredSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(monitored_session.MonitoredSession,
predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
if __name__ == "__main__":
googletest.main()
|
|
import os, time, urllib2, threading, hashlib, shutil, gzip
import xbmc, xbmcvfs, xbmcaddon
from StringIO import StringIO
from PIL import Image
from PIL import ImageEnhance
__addon__ = xbmcaddon.Addon()
__mainaddon__ = xbmcaddon.Addon('weather.openweathermap.extended')
__addonid__ = __addon__.getAddonInfo('id')
__cwd__ = __addon__.getAddonInfo('path').decode("utf-8")
__resource__ = xbmc.translatePath( os.path.join( __cwd__, 'resources', 'lib' ).encode("utf-8") ).decode("utf-8")
sys.path.append(__resource__)
from utils import *
ZOOM = int(__mainaddon__.getSetting('Zoom')) + 3
class Main:
def __init__(self):
set_property('Map.IsFetched', 'true')
lat, lon = self._parse_argv()
self._get_maps(lat, lon)
def _parse_argv(self):
try:
params = dict(arg.split('=') for arg in sys.argv[ 1 ].split('&'))
except:
params = {}
lat = params.get('lat', '')
lon = params.get('lon', '')
return lat, lon
def _get_maps(self, lat, lon):
md5 = hashlib.md5()
locationdeg = [lat, lon]
md5.update(str(locationdeg) + str(ZOOM))
tag = md5.hexdigest()
streetthread_created = False
stamp = int(time.time())
street_url = 'http://c.tile.openstreetmap.org/%i/%i/%i.png'
precip_url = 'http://undefined.tile.openweathermap.org/map/precipitation/%i/%i/%i.png'
clouds_url = 'http://undefined.tile.openweathermap.org/map/clouds/%i/%i/%i.png'
temp_url = 'http://undefined.tile.openweathermap.org/map/temp/%i/%i/%i.png'
wind_url = 'http://undefined.tile.openweathermap.org/map/wind/%i/%i/%i.png'
pressure_url = 'http://undefined.tile.openweathermap.org/map/pressure_cntr/%i/%i/%i.png'
streetmapdir = xbmc.translatePath('special://profile/addon_data/%s/maps/streetmap-%s/' % (__addonid__, tag))
precipmapdir = xbmc.translatePath('special://profile/addon_data/%s/maps/precipmap/' % __addonid__)
cloudsmapdir = xbmc.translatePath('special://profile/addon_data/%s/maps/cloudsmap/' % __addonid__)
tempmapdir = xbmc.translatePath('special://profile/addon_data/%s/maps/tempmap/' % __addonid__)
windmapdir = xbmc.translatePath('special://profile/addon_data/%s/maps/windmap/' % __addonid__)
pressuremapdir = xbmc.translatePath('special://profile/addon_data/%s/maps/pressuremap/' % __addonid__)
lat = float(lat)
lon = float(lon)
x, y = GET_TILE(lat, lon, ZOOM)
imgs = [[x-1,y-1], [x,y-1], [x+1,y-1], [x-1,y], [x,y], [x+1,y], [x-1,y+1], [x,y+1], [x+1,y+1]]
# adjust for locations on the edge of the map
tile_max = 2**ZOOM
if x == 0:
imgs = [[tile_max,y-1], [x,y-1], [x+1,y-1], [tile_max,y], [x,y], [x+1,y], [tile_max,y+1], [x,y+1], [x+1,y+1]]
elif x == tile_max:
imgs = [[x-1,y-1], [x,y-1], [0,y-1], [x-1,y], [x,y], [0,y], [x-1,y+1], [x,y+1], [0,y+1]]
if y == 0:
imgs = [[x-1,tile_max], [x,tile_max], [x+1,tile_max], [x-1,y], [x,y], [x+1,y], [x-1,y+1], [x,y+1], [x+1,y+1]]
elif y == tile_max:
imgs = [[x-1,y-1], [x,y-1], [x+1,y-1], [x-1,y], [x,y], [x+1, y], [x-1,0], [x,0], [x+1,0]]
# delete old maps
if xbmcvfs.exists(precipmapdir):
shutil.rmtree(precipmapdir)
if xbmcvfs.exists(cloudsmapdir):
shutil.rmtree(cloudsmapdir)
if xbmcvfs.exists(tempmapdir):
shutil.rmtree(tempmapdir)
if xbmcvfs.exists(windmapdir):
shutil.rmtree(windmapdir)
if xbmcvfs.exists(pressuremapdir):
shutil.rmtree(pressuremapdir)
if not xbmcvfs.exists(streetmapdir):
xbmcvfs.mkdirs(streetmapdir)
# download the streetmap once, unless location or zoom has changed
if not xbmcvfs.exists(os.path.join(streetmapdir, 'streetmap.png')):
thread_street = get_tiles(streetmapdir, 'streetmap.png', stamp, imgs, street_url)
thread_street.start()
streetthread_created = True
if not xbmcvfs.exists(precipmapdir):
xbmcvfs.mkdirs(precipmapdir)
thread_precip = get_tiles(precipmapdir, 'precipmap-%s.png', stamp, imgs, precip_url)
thread_precip.start()
if not xbmcvfs.exists(cloudsmapdir):
xbmcvfs.mkdirs(cloudsmapdir)
thread_clouds = get_tiles(cloudsmapdir, 'cloudsmap-%s.png', stamp, imgs, clouds_url)
thread_clouds.start()
if not xbmcvfs.exists(tempmapdir):
xbmcvfs.mkdirs(tempmapdir)
thread_temp = get_tiles(tempmapdir, 'tempmap-%s.png', stamp, imgs, temp_url)
thread_temp.start()
if not xbmcvfs.exists(windmapdir):
xbmcvfs.mkdirs(windmapdir)
thread_wind = get_tiles(windmapdir, 'windmap-%s.png', stamp, imgs, wind_url)
thread_wind.start()
if not xbmcvfs.exists(pressuremapdir):
xbmcvfs.mkdirs(pressuremapdir)
thread_pressure = get_tiles(pressuremapdir, 'pressuremap-%s.png', stamp, imgs, pressure_url)
thread_pressure.start()
if streetthread_created:
thread_street.join()
thread_precip.join()
thread_clouds.join()
thread_temp.join()
thread_wind.join()
thread_pressure.join()
set_property('Map.1.Area', xbmc.translatePath('special://profile/addon_data/%s/maps/streetmap-%s/streetmap.png' % (__addonid__, tag)))
set_property('Map.2.Area', xbmc.translatePath('special://profile/addon_data/%s/maps/streetmap-%s/streetmap.png' % (__addonid__, tag)))
set_property('Map.3.Area', xbmc.translatePath('special://profile/addon_data/%s/maps/streetmap-%s/streetmap.png' % (__addonid__, tag)))
set_property('Map.4.Area', xbmc.translatePath('special://profile/addon_data/%s/maps/streetmap-%s/streetmap.png' % (__addonid__, tag)))
set_property('Map.5.Area', xbmc.translatePath('special://profile/addon_data/%s/maps/streetmap-%s/streetmap.png' % (__addonid__, tag)))
set_property('Map.1.Layer', xbmc.translatePath('special://profile/addon_data/%s/maps/precipmap/precipmap-%s.png' % (__addonid__, stamp)))
set_property('Map.2.Layer', xbmc.translatePath('special://profile/addon_data/%s/maps/cloudsmap/cloudsmap-%s.png' % (__addonid__, stamp)))
set_property('Map.3.Layer', xbmc.translatePath('special://profile/addon_data/%s/maps/tempmap/tempmap-%s.png' % (__addonid__, stamp)))
set_property('Map.4.Layer', xbmc.translatePath('special://profile/addon_data/%s/maps/windmap/windmap-%s.png' % (__addonid__, stamp)))
set_property('Map.5.Layer', xbmc.translatePath('special://profile/addon_data/%s/maps/pressuremap/pressuremap-%s.png' % (__addonid__, stamp)))
set_property('Map.1.Heading', xbmc.getLocalizedString(1448))
set_property('Map.2.Heading', xbmc.getLocalizedString(387))
set_property('Map.3.Heading', xbmc.getLocalizedString(1375))
set_property('Map.4.Heading', xbmc.getLocalizedString(383))
set_property('Map.5.Heading', xbmc.getLocalizedString(1376))
if 'F' in TEMPUNIT:
set_property('Map.1.Legend' , xbmc.translatePath(os.path.join(__cwd__, 'resources', 'graphics', 'precip-in.png')))
else:
set_property('Map.1.Legend' , xbmc.translatePath(os.path.join(__cwd__, 'resources', 'graphics', 'precip-mm.png')))
set_property('Map.2.Legend' , xbmc.translatePath(os.path.join(__cwd__, 'resources', 'graphics', 'clouds.png')))
if 'F' in TEMPUNIT:
set_property('Map.3.Legend' , xbmc.translatePath(os.path.join(__cwd__, 'resources', 'graphics', 'temp-f.png')))
else:
set_property('Map.3.Legend' , xbmc.translatePath(os.path.join(__cwd__, 'resources', 'graphics', 'temp-c.png')))
if SPEEDUNIT == 'mph':
set_property('Map.4.Legend' , xbmc.translatePath(os.path.join(__cwd__, 'resources', 'graphics', 'wind-mi.png')))
elif SPEEDUNIT == 'Beaufort':
set_property('Map.4.Legend' , xbmc.translatePath(os.path.join(__cwd__, 'resources', 'graphics', 'wind-bft.png')))
else:
set_property('Map.4.Legend' , xbmc.translatePath(os.path.join(__cwd__, 'resources', 'graphics', 'wind-kmh.png')))
set_property('Map.5.Legend' , '')
class get_tiles(threading.Thread):
def __init__(self, mapdir, mapfile, stamp, imgs, url):
self.mapdir = mapdir
self.mapfile = mapfile
self.stamp = stamp
self.imgs = imgs
self.url = url
threading.Thread.__init__(self)
def run(self):
self.fetch_tiles(self.imgs, self.mapdir)
self.merge_tiles()
def fetch_tiles(self, imgs, mapdir):
count = 1
failed = []
for img in imgs:
try:
query = self.url % (ZOOM, img[0], img[1])
req = urllib2.Request(query)
req.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(req)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read())
compr = gzip.GzipFile(fileobj=buf)
data = compr.read()
else:
data = response.read()
response.close()
log('image downloaded')
except:
data = ''
log('image download failed, retry')
if len(img) == 2:
img.append(str(count))
failed.append(img)
if data != '':
if len(img) == 3:
num = img[2]
else:
num = str(count)
tilefile = xbmc.translatePath(os.path.join(mapdir, num + '.png')).decode("utf-8")
try:
tmpmap = open(tilefile, 'wb')
tmpmap.write(data)
tmpmap.close()
except:
log('failed to save image')
return
count += 1
if MONITOR.abortRequested():
return
if failed:
xbmc.sleep(10000)
self.fetch_tiles(failed, mapdir)
def merge_tiles(self):
out = Image.new("RGBA", (756, 756), None)
count = 1
imy = 0
for y in range(0,3):
imx = 0
for x in range(0,3):
tile_file = os.path.join(self.mapdir,str(count)+".png")
count += 1
try:
tile = Image.open(tile_file)
except:
return
out.paste( tile, (imx, imy), tile.convert('RGBA') )
imx += 256
imy += 256
if self.mapfile[0:6] == 'precip' or self.mapfile[0:6] == 'clouds':
enhancer = ImageEnhance.Brightness(out)
out = enhancer.enhance(0.3)
if not self.mapfile == 'streetmap.png':
out.save(os.path.join(self.mapdir,self.mapfile % str(self.stamp)))
else:
out.save(os.path.join(self.mapdir,self.mapfile))
class MyMonitor(xbmc.Monitor):
def __init__(self, *args, **kwargs):
xbmc.Monitor.__init__(self)
MONITOR = MyMonitor()
if ( __name__ == "__main__" ):
Main()
|
|
"""
kombu.transport.mongodb
=======================
MongoDB transport.
:copyright: (c) 2010 - 2012 by Flavio Percoco Premoli.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import pymongo
from pymongo import errors
from anyjson import loads, dumps
from pymongo.connection import Connection
from kombu.exceptions import StdConnectionError, StdChannelError
from kombu.five import Empty
from . import virtual
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 27017
__author__ = """\
Flavio [FlaPer87] Percoco Premoli <flaper87@flaper87.org>;\
Scott Lyons <scottalyons@gmail.com>;\
"""
class Channel(virtual.Channel):
_client = None
supports_fanout = True
_fanout_queues = {}
def __init__(self, *vargs, **kwargs):
super_ = super(Channel, self)
super_.__init__(*vargs, **kwargs)
self._queue_cursors = {}
self._queue_readcounts = {}
def _new_queue(self, queue, **kwargs):
pass
def _get(self, queue):
try:
if queue in self._fanout_queues:
msg = next(self._queue_cursors[queue])
self._queue_readcounts[queue] += 1
return loads(msg['payload'])
else:
msg = self.client.command(
'findandmodify', 'messages',
query={'queue': queue},
sort={'_id': pymongo.ASCENDING}, remove=True,
)
except errors.OperationFailure as exc:
if 'No matching object found' in exc.args[0]:
raise Empty()
raise
except StopIteration:
raise Empty()
# as of mongo 2.0 empty results won't raise an error
if msg['value'] is None:
raise Empty()
return loads(msg['value']['payload'])
def _size(self, queue):
if queue in self._fanout_queues:
return (self._queue_cursors[queue].count() -
self._queue_readcounts[queue])
return self.client.messages.find({'queue': queue}).count()
def _put(self, queue, message, **kwargs):
self.client.messages.insert({'payload': dumps(message),
'queue': queue})
def _purge(self, queue):
size = self._size(queue)
if queue in self._fanout_queues:
cursor = self._queue_cursors[queue]
cursor.rewind()
self._queue_cursors[queue] = cursor.skip(cursor.count())
else:
self.client.messages.remove({'queue': queue})
return size
def close(self):
super(Channel, self).close()
if self._client:
self._client.connection.end_request()
def _open(self):
"""
See mongodb uri documentation:
http://www.mongodb.org/display/DOCS/Connections
"""
conninfo = self.connection.client
dbname = None
hostname = None
if not conninfo.hostname:
conninfo.hostname = DEFAULT_HOST
for part in conninfo.hostname.split('/'):
if not hostname:
hostname = 'mongodb://' + part
continue
dbname = part
if '?' in part:
# In case someone is passing options
# to the mongodb connection. Right now
# it is not permitted by kombu
dbname, options = part.split('?')
hostname += '/?' + options
hostname = "%s/%s" % (
hostname, dbname in [None, "/"] and "admin" or dbname,
)
if not dbname or dbname == "/":
dbname = "kombu_default"
# At this point we expect the hostname to be something like
# (considering replica set form too):
#
# mongodb://[username:password@]host1[:port1][,host2[:port2],
# ...[,hostN[:portN]]][/[?options]]
mongoconn = Connection(host=hostname, ssl=conninfo.ssl)
version = mongoconn.server_info()['version']
if tuple(map(int, version.split('.')[:2])) < (1, 3):
raise NotImplementedError(
'Kombu requires MongoDB version 1.3+ (server is {0})'.format(
version))
database = getattr(mongoconn, dbname)
# This is done by the connection uri
# if conninfo.userid:
# database.authenticate(conninfo.userid, conninfo.password)
self.db = database
col = database.messages
col.ensure_index([('queue', 1), ('_id', 1)], background=True)
if 'messages.broadcast' not in database.collection_names():
capsize = conninfo.transport_options.get(
'capped_queue_size') or 100000
database.create_collection('messages.broadcast',
size=capsize, capped=True)
self.bcast = getattr(database, 'messages.broadcast')
self.bcast.ensure_index([('queue', 1)])
self.routing = getattr(database, 'messages.routing')
self.routing.ensure_index([('queue', 1), ('exchange', 1)])
return database
#TODO: Store a more complete exchange metatable in the routing collection
def get_table(self, exchange):
"""Get table of bindings for ``exchange``."""
localRoutes = frozenset(self.state.exchanges[exchange]['table'])
brokerRoutes = self.client.messages.routing.find(
{'exchange': exchange}
)
return localRoutes | frozenset((r['routing_key'],
r['pattern'],
r['queue']) for r in brokerRoutes)
def _put_fanout(self, exchange, message, **kwargs):
"""Deliver fanout message."""
self.client.messages.broadcast.insert({'payload': dumps(message),
'queue': exchange})
def _queue_bind(self, exchange, routing_key, pattern, queue):
if self.typeof(exchange).type == 'fanout':
cursor = self.bcast.find(query={'queue': exchange},
sort=[('$natural', 1)], tailable=True)
# Fast forward the cursor past old events
self._queue_cursors[queue] = cursor.skip(cursor.count())
self._queue_readcounts[queue] = cursor.count()
self._fanout_queues[queue] = exchange
meta = {'exchange': exchange,
'queue': queue,
'routing_key': routing_key,
'pattern': pattern}
self.client.messages.routing.update(meta, meta, upsert=True)
def queue_delete(self, queue, **kwargs):
self.routing.remove({'queue': queue})
super(Channel, self).queue_delete(queue, **kwargs)
if queue in self._fanout_queues:
self._queue_cursors[queue].close()
self._queue_cursors.pop(queue, None)
self._fanout_queues.pop(queue, None)
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (StdConnectionError, errors.ConnectionFailure)
channel_errors = (StdChannelError,
errors.ConnectionFailure,
errors.OperationFailure)
driver_type = 'mongodb'
driver_name = 'pymongo'
def driver_version(self):
return pymongo.version
|
|
#!/usr/bin/python
import os, string, sys, time, re, math, fileinput, glob, shutil
import threading, platform, codecs, commands
from os.path import expanduser
from time import gmtime, strftime
# * Description: For clone and update git
# my-git.py project_flag branch_name archive_flag
# my-git.py apk master
# Copyright (C) 2017 Roy Chen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# increment this whenever we make important changes to this script
VERSION = (0, 1)
#--------------------------------------------------------------------------------------------------
g_clone_from_home_mirror = "0"
g_have_archive = "0"
g_rm_bin_file = "0"
g_archive_ui = "0"
g_apk_repo = "0"
g_thread_number = "1"
#--------------------------------------------------------------------------------------------------
def get_my_home_dir() :
ret = os.path.expanduser('~')
return my_abspath(ret) + "/"
def my_abspath(path) :
if ( 'Windows' in platform.system() ):
ret = os.path.abspath(path);
else :
ret = os.path.abspath(path);
return ret
def get_git_home_dir(host) :
if ( 'Windows' in platform.system() ):
ret = str("d:/" + "w/mygit/" + host)
else :
ret = get_my_home_dir() + "/w/mygit/" + host
ret = my_abspath(ret)
return ret
g_archive_time = "wonnow"
def get_g_archive_time() :
global g_archive_time
if ( g_archive_time == "wonnow" ):
g_archive_time = strftime("%Y-%m-%d_%H-%M-%S", gmtime())
ret = g_archive_time
return ret
def get_archive_out(out_dir, which, branch) :
# ret = out_dir + "/archive/" + branch + "/" + get_g_archive_time() + "/" + which
# ret = out_dir + "/archive/" + branch + "/" + which + "/backup/"
ret = out_dir + "/archive/" + which + "/backup/"
ret = my_abspath(ret)
return ret
def get_my_tmp_dir_out_out(out_dir, which, branch) :
ret = get_my_home_dir() + "/w/tmp/" + which + "/" + get_g_archive_time() + "/" + branch
ret = my_abspath(ret)
# print ret
return ret
def set_git_path() :
if ( 'Windows' in platform.system() ):
GIT_PATH = "C:\Program Files\Git\usr\bin"
if GIT_PATH not in sys.path:
sys.path.append(GIT_PATH)
print(sys.path)
#def now_time() :
# return str( time.strftime( '%Y%m%d-%H%M%S' , time.localtime() ) )
def get_scp() :
if ( 'Windows' in platform.system() ):
#ret = str('c:/Program Files/Git/usr/bin/scp')
#TODO
ret = str('c:/windows/scp')
else :
ret = "/usr/bin/scp"
return ret
def my_mk_dirs(out_dir) :
if os.path.exists(out_dir):
return
os.makedirs(out_dir)
def run_cmd(cmd):
print("run: " + cmd)
#os.system(cmd)
#os.popen(cmd)
(status, output) = commands.getstatusoutput(cmd)
print status, output
return output
#--------------------------------------------------------------------------------------------------
def start_thread(func):
#print("start_thread: " + arg1)
th = threading.Thread(target = func, )
th.start()
return th
def start_thread_arg5(func, arg1, arg2, arg3, arg4, arg5):
#print("start_thread: " + cmd)
th = threading.Thread(target = func, args = (arg1,arg2,arg3,arg4,arg5,))
th.start()
return th
#--------------------------------------------------------------------------------------------------
def clone_cmd(out_dir, which, host, port):
#git clone ssh://xxx@gerrit.xxx.com:29418/git/android/AMSS/build
#git clone file:///home/mirror/device/.git/
global g_clone_from_home_mirror
if (g_clone_from_home_mirror == "0"):
cmd = "git clone ssh://" + host + ":" + port + "/" + which + ".git" + " " + my_abspath(out_dir + "/" + which)
else :
cmd = "git clone file://" + "/home/mirror" + "/" + which + ".git" + " " + my_abspath(out_dir + "/" + which)
# print("clone_cmd: " + cmd)
return cmd
def scp_cmd(out_dir, which, host, port):
global g_clone_from_home_mirror
if (g_clone_from_home_mirror == "0"):
cmd = my_abspath(get_scp()) + " -P " + port + " -p " + host + ":/hooks/commit-msg " + my_abspath(out_dir + "/" + which + "/.git/hooks/")
# print("scp_cmd: " + cmd)
return cmd
def cmd_co_track(out_dir, which, branch):
cmd = "git checkout --track origin/" + branch
# print("cmd_co_track: " + cmd)
return cmd
def cmd_co(out_dir, which, branch):
cmd = "git checkout " + branch
# print("cmd_co: " + cmd)
return cmd
def cmd_re_up(out_dir, which, branch):
return "git remote update"
def cmd_clean(out_dir, which, branch):
return "git clean -df"
def cmd_reset_hard(out_dir, which, branch):
return "git reset --hard origin/" + branch
def cmd_archive(archive_out, which, branch):
#git archive --format=tar branch_xxx | gzip > /home/chenyong/tmp/brach_xxx.tar.gz
# archive_out = out_dir + "/archive/" + strftime("%Y-%m-%d_%H:%M:%S", gmtime()) + "/" + which
#TODO get name zz.tar.gz
# return "git archive --format=tar HEAD" + " | gzip > " + archive_out + "/" + "zz.tar.gz"
return "git archive --format=tar HEAD" + " | gzip > " + archive_out + "/" + get_g_archive_time() + ".tar.gz"
#def cmd_commit_dev(out_dir, which, branch):
# return 'git commit -asm "IB-02264-chenyong:sys sync by int"'
def cmd_push_branch(out_dir, which, branch):
return "git push --no-thin origin HEAD:" + branch
def run_git_cmd(out_dir, which_git, git_cmd):
pwd = os.getcwd()
repo_dir = my_abspath(out_dir + "/" + which_git)
# my_mk_dirs( repo_dir )
os.chdir( repo_dir )
# print("git_cmd: " + git_cmd)
ret = run_cmd(git_cmd)
os.chdir( my_abspath(pwd) )
return ret
#--------------------------------------------------------------------------------------------------
def run_my_cmd(out_dir, my_cmd):
pwd = os.getcwd()
my_dir = my_abspath(out_dir)
# print("my_dir: " + my_dir)
# my_mk_dirs( my_dir )
os.chdir( my_dir )
# print("my_cmd: " + my_cmd)
ret = run_cmd(my_cmd)
os.chdir( my_abspath(pwd) )
return ret
#--------------------------------------------------------------------------------------------------
def rm_bin_file( my_tmp_dir_out ):
global g_archive_ui
run_my_cmd( my_tmp_dir_out, "find -name aapt |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name vaultic_*xx* |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.mbn |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.mdt |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.bin |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.img |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.elf |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.zip |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.b0* |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.b1* |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.ogg |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.dat |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.gitignore |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.a |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.dll |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.dylib |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.ttf |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.wav |xargs rm -rf" )
# time.sleep( 1 )
run_my_cmd( my_tmp_dir_out, "find -name *.apk |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.theme |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.bin |xargs rm -rf" )
#time.sleep( 1 )
if (g_archive_ui == "0"):
run_my_cmd( my_tmp_dir_out, "find -name *.db |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.jar |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.so |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.bmp |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.png |xargs rm -rf" )
run_my_cmd( my_tmp_dir_out, "find -name *.jpg |xargs rm -rf" )
def run_func_archive(out_dir, which_git, branch):
global g_rm_bin_file
my_tmp_dir_out = get_my_tmp_dir_out_out(out_dir, which_git, branch)
if (os.path.exists(my_tmp_dir_out)):
shutil.rmtree(my_tmp_dir_out)
my_mk_dirs( my_tmp_dir_out )
print ("my_tmp_dir_out:" + my_tmp_dir_out)
# time.sleep( 1 )
archive_out_dir = get_archive_out(out_dir, which_git, branch)
if (os.path.exists(archive_out_dir)):
shutil.rmtree(archive_out_dir)
my_mk_dirs( archive_out_dir )
print ("archive_out_dir:" + archive_out_dir)
run_git_cmd( out_dir, which_git, cmd_archive(archive_out_dir, which_git, branch) )
if (g_rm_bin_file == "0"):
return
# run_my_cmd( my_tmp_dir_out, "rm -rf " + my_abspath(my_tmp_dir_out) + "/*" )
# time.sleep( 1 )
run_my_cmd( archive_out_dir, "tar -zxvf " + get_g_archive_time() + ".tar.gz -C " + my_tmp_dir_out )
# run_my_cmd( archive_out_dir, "rm -rf zz.tar.gz" )
time.sleep( 1 )
ret = run_cmd( "du -sh " + my_tmp_dir_out )
if (ret.startswith("4.0K")):
print ("no file in:" + my_tmp_dir_out)
shutil.rmtree(archive_out_dir)
return
if (g_rm_bin_file == "1"):
rm_bin_file( my_tmp_dir_out )
time.sleep( 1 )
run_my_cmd( archive_out_dir, "rm -rf " + get_g_archive_time() + ".tar.gz" )
ret = run_cmd( "du -sh " + my_tmp_dir_out )
if (ret.startswith("4.0K")):
print ("no file in:" + my_tmp_dir_out)
shutil.rmtree(archive_out_dir)
return
# run_my_cmd( my_tmp_dir_out, "tar -zcvf " + my_abspath(archive_out_dir) +"/a.tar.gz ./*" )
run_my_cmd( my_tmp_dir_out, "tar -zcvf " + my_abspath(archive_out_dir) + get_g_archive_time() + ".tar.gz ./*" )
return
#--------------------------------------------------------------------------------------------------
def my_func(out_dir, which_git, branch, host, port):
global g_have_archive
mygit = my_abspath(out_dir + "/" + which_git)
print("run cmd on repo: " + mygit)
if os.path.exists(mygit):
run_git_cmd( out_dir, which_git, cmd_re_up(out_dir, which_git, branch) )
run_git_cmd( out_dir, which_git, cmd_co_track(out_dir, which_git, branch) )
ret = run_git_cmd( out_dir, which_git, cmd_co(out_dir, which_git, branch) )
#error: pathspec 'masters' did not match any file(s) known to git.
if (ret.startswith('error: pathspec')):
print ("cant co branch:" + branch + ", skip it. ")
return ret
#run_git_cmd( out_dir, which_git, cmd_re_up(out_dir, which_git, branch) )
run_git_cmd( out_dir, which_git, cmd_clean(out_dir, which_git, branch) )
run_git_cmd( out_dir, which_git, cmd_reset_hard(out_dir, which_git, branch) )
if (g_have_archive == "1"):
run_func_archive(out_dir, which_git, branch)
else:
print("clone: " + mygit)
ret = run_cmd( clone_cmd(out_dir, which_git, host, port) )
run_cmd( scp_cmd(out_dir, which_git, host, port) )
run_git_cmd( out_dir, which_git, cmd_co_track(out_dir, which_git, branch) )
return ret
def git_op_by_thread(pool, host, port, out_dir, which_git, branch):
print( "get repo:" + which_git )
pool.append( start_thread_arg5(my_func, out_dir, which_git, branch, host, port) )
return
def is_skip_repo(repo_name):
list_skip_repo = ( \
'git/aosp',\
'vendor/qcom/proprietary/llvm-arm-toolchain-ship',\
)
for skip_repo in list_skip_repo:
if (repo_name.startswith(skip_repo)):
return True
return False
#--------------------------------------------------------------------------------------------------
def get_git_repo_by_thpool(host, port, branch_name, project_list):
global g_apk_repo
global g_thread_number
thpool = []
out_dir = get_git_home_dir(host)
print( "git_out_dir: " + out_dir )
my_mk_dirs( out_dir )
input_file = project_list
if os.access(input_file, os.R_OK) == 0:
print("no file: " + input_file + "\n")
return
fp = codecs.open(input_file, 'r', "'utf-8'")
lines = fp.readlines()
fp.close()
thread_count = 0
thread_sum = g_thread_number
#TODO
for line in lines:
line=line.strip()
print ("line:" + line)
if (is_skip_repo(line)):
print ("skip_repo:" + line + "\n")
continue
#TODO
# if (line.find(branch_name) == "-1"):
# print ("skip_repo:" + line + ", for repo\n")
# continue
# for apk branch
if ("1" == g_apk_repo):
#33repos/xx_InCallUI
if (line.rfind("/") > 0):
branch_name = line[line.rfind("/")+1:len(line)];
else:
branch_name = line
print ("branch_name:" + branch_name)
branch_name = branch_name + "_Int"
print ("branch_name:" + branch_name)
git_op_by_thread(thpool, host, port, out_dir, line, branch_name)
#/TODO too many thread
thread_count += 1
if (thread_count >= thread_sum):
for th in thpool:
th.join()
print ("wait all thread join ...")
thread_count = 0
print ("thread_count:%d" % (thread_count) )
for th in thpool:
th.join()
return
#--------------------------------------------------------------------------------------------------
def main(argv):
global g_clone_from_home_mirror
global g_have_archive
global g_archive_ui
global g_thread_number
global g_apk_repo
repo_flag = argv[1]
# branch = "master"
branch_name = sys.argv[2]
if (len(sys.argv) > 3):
g_have_archive = sys.argv[3]
if (len(sys.argv) > 4):
g_archive_ui = sys.argv[4]
port = "294xx"
print("repo_flag:" + repo_flag)
print("branch_name:" + branch_name)
print ("g_have_archive:" + (g_have_archive))
print ("g_archive_ui:" + (g_archive_ui))
# android apk
if (repo_flag == "apk"):
g_apk_repo = "1"
host = "1x.1x.1x.1x"
elif (repo_flag == "local"):
g_clone_from_home_mirror = "1"
host = "127.0.0.1"
run_cmd("ssh -p " + port + " " + host +" gerrit ls-projects > ls."+ host )
get_git_repo_by_thpool(host, port, branch_name, "ls." + host)
return
if __name__ == '__main__':
main(sys.argv)
|
|
import unittest
import ctypes
import errno
import os
import tempfile
from pathlib import Path
from g1.files import xattrs
from g1.files import _xattrs
class XattrsTestBase(unittest.TestCase):
def setUp(self):
super().setUp()
self._tempfile = tempfile.NamedTemporaryFile() # pylint: disable=consider-using-with
self._temp_symlink = self._tempfile.name + '-symlink'
os.symlink(self._tempfile.name, self._temp_symlink)
self.temp_path = Path(self._tempfile.name)
self.temp_path_str = self._tempfile.name
self.temp_path_bytes = self._tempfile.name.encode('ascii')
self.temp_symlink = self._temp_symlink.encode('ascii')
self.temp_fd = self._tempfile.fileno()
def tearDown(self):
self._tempfile.close()
os.unlink(self._temp_symlink)
super().tearDown()
def assert_listxattr(self, expect_regular):
for listxattr, path, expect in [
(_xattrs.listxattr, self.temp_path_bytes, expect_regular),
(_xattrs.listxattr, self.temp_symlink, expect_regular),
(_xattrs.llistxattr, self.temp_symlink, b''),
(_xattrs.flistxattr, self.temp_fd, expect_regular),
]:
with self.subTest((listxattr, path, expect)):
buffer = ctypes.create_string_buffer(len(expect) + 10)
self.assertEqual(
listxattr(path, buffer, len(buffer)),
len(expect),
)
self.assertEqual(buffer.raw[:len(expect)], expect)
expect_bytes = expect_regular.split(b'\x00')
expect_str = [x.decode('utf-8') for x in expect_bytes]
for path in [
self.temp_path,
self.temp_path_bytes,
self.temp_path_str,
self.temp_fd,
]:
with self.subTest(path):
self.assertEqual(
xattrs.listxattr(path, encoding=None), expect_bytes
)
self.assertEqual(xattrs.listxattr(path), expect_str)
def assert_getxattr(self, name_bytes, expect_regular):
for getxattr, path, expect in [
(_xattrs.getxattr, self.temp_path_bytes, expect_regular),
(_xattrs.getxattr, self.temp_symlink, expect_regular),
(_xattrs.lgetxattr, self.temp_symlink, b''),
(_xattrs.fgetxattr, self.temp_fd, expect_regular),
]:
with self.subTest((getxattr, path, expect)):
buffer = ctypes.create_string_buffer(len(expect) + 10)
if expect:
self.assertEqual(
getxattr(path, name_bytes, buffer, len(buffer)),
len(expect),
)
self.assertEqual(buffer.raw[:len(expect)], expect)
else:
with self.assertRaises(OSError) as cm:
getxattr(path, name_bytes, buffer, len(buffer))
self.assertEqual(cm.exception.args[0], errno.ENODATA)
for path in [
self.temp_path,
self.temp_path_bytes,
self.temp_path_str,
self.temp_fd,
]:
for name in [
name_bytes,
name_bytes.decode('ascii'),
]:
with self.subTest((path, name)):
attr = xattrs.getxattr(path, name)
if expect_regular:
self.assertEqual(attr, expect_regular)
else:
self.assertIsNone(attr)
class XattrsLowLevelTest(XattrsTestBase):
def test_xattr_path(self):
self.do_test_xattr(
_xattrs.setxattr, _xattrs.removexattr, self.temp_path_bytes
)
def test_xattr_symlink(self):
self.do_test_xattr(
_xattrs.setxattr, _xattrs.removexattr, self.temp_symlink
)
def test_fxattr(self):
self.do_test_xattr(
_xattrs.fsetxattr, _xattrs.fremovexattr, self.temp_fd
)
def do_test_xattr(self, setxattr, removexattr, path):
self.assert_listxattr(b'')
self.assert_getxattr(b'user.foo.bar', b'')
self.assert_getxattr(b'user.spam', b'')
self.assertEqual(setxattr(path, b'user.foo.bar', b'x', 1, 0), 0)
self.assert_listxattr(b'user.foo.bar\x00')
self.assert_getxattr(b'user.foo.bar', b'x')
self.assert_getxattr(b'user.spam', b'')
self.assertEqual(setxattr(path, b'user.spam', b'egg', 3, 0), 0)
self.assert_listxattr(b'user.foo.bar\x00user.spam\x00')
self.assert_getxattr(b'user.foo.bar', b'x')
self.assert_getxattr(b'user.spam', b'egg')
self.assertEqual(removexattr(path, b'user.foo.bar'), 0)
self.assert_listxattr(b'user.spam\x00')
self.assert_getxattr(b'user.foo.bar', b'')
self.assert_getxattr(b'user.spam', b'egg')
def test_lxattr(self):
with self.assertRaises(OSError) as cm:
_xattrs.lsetxattr(self.temp_symlink, b'user.foo.bar', b'x', 1, 0)
self.assertEqual(cm.exception.args[0], errno.EPERM)
def test_setxattr_flags(self):
name = b'user.foo.bar'
with self.assertRaises(OSError) as cm:
_xattrs.setxattr(
self.temp_path_bytes, name, b'x', 1, _xattrs.XATTR_REPLACE
)
self.assertEqual(cm.exception.args[0], errno.ENODATA)
_xattrs.setxattr(self.temp_path_bytes, name, b'x', 1, 0)
with self.assertRaises(OSError) as cm:
_xattrs.setxattr(
self.temp_path_bytes, name, b'x', 1, _xattrs.XATTR_CREATE
)
self.assertEqual(cm.exception.args[0], errno.EEXIST)
def test_erange_error(self):
name = b'user.foo.bar'
self.assertEqual(
_xattrs.setxattr(self.temp_path_bytes, name, b'xyz', 3, 0), 0
)
buffer = ctypes.create_string_buffer(1)
def do_test_erange_error(func, *args):
with self.assertRaises(OSError) as cm:
func(*args, buffer, len(buffer))
self.assertEqual(cm.exception.args[0], errno.ERANGE)
do_test_erange_error(_xattrs.listxattr, self.temp_path_bytes)
do_test_erange_error(_xattrs.flistxattr, self.temp_fd)
do_test_erange_error(_xattrs.getxattr, self.temp_path_bytes, name)
do_test_erange_error(_xattrs.fgetxattr, self.temp_fd, name)
class XattrsTest(XattrsTestBase):
def test_xattr(self):
name_str = 'user.foo.bar'
name_bytes = name_str.encode('ascii')
names = b'%s\x00' % name_bytes
def do_test_xattr(path, name):
self.assert_listxattr(b'')
self.assert_getxattr(name_bytes, b'')
xattrs.setxattr(path, name, b'x')
self.assert_listxattr(names)
self.assert_getxattr(name_bytes, b'x')
xattrs.removexattr(path, name)
self.assert_listxattr(b'')
self.assert_getxattr(name_bytes, b'')
for name in [name_str, name_bytes]:
do_test_xattr(self.temp_path, name)
do_test_xattr(self.temp_path_bytes, name)
do_test_xattr(self.temp_path_str, name)
do_test_xattr(self.temp_fd, name)
def test_setxattr_flags(self):
name = 'user.foo.bar'
with self.assertRaises(OSError) as cm:
xattrs.setxattr(self.temp_path, name, b'x', xattrs.XATTR_REPLACE)
self.assertEqual(cm.exception.args[0], errno.ENODATA)
xattrs.setxattr(self.temp_path, name, b'x', 0)
with self.assertRaises(OSError) as cm:
xattrs.setxattr(self.temp_path, name, b'x', xattrs.XATTR_CREATE)
self.assertEqual(cm.exception.args[0], errno.EEXIST)
def test_read_bytes(self):
xattrs.setxattr(self.temp_path, 'user.x', b'hello world', 0)
with self.assertRaisesRegex(
ValueError, r'size of listxattr exceeds 4'
):
xattrs._read_bytes(
'listxattr',
_xattrs.listxattr,
(self.temp_path_bytes, ),
buffer_size=2,
buffer_size_limit=4,
)
with self.assertRaisesRegex(ValueError, r'size of user.x exceeds 8'):
xattrs._read_bytes(
'user.x',
_xattrs.getxattr,
(self.temp_path_bytes, b'user.x'),
buffer_size=2,
buffer_size_limit=8,
)
if __name__ == '__main__':
unittest.main()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for dealing with circular statistics, for
instance, mean, variance, standard deviation, correlation coefficient, and so
on. This module also cover tests of uniformity, e.g., the Rayleigh and V tests.
The Maximum Likelihood Estimator for the Von Mises distribution along with the
Cramer-Rao Lower Bounds are also implemented. Almost all of the implementations
are based on reference [1]_, which is also the basis for the R package
'CircStats' [2]_.
"""
import numpy as np
from astropy.units import Quantity
__all__ = ['circmean', 'circvar', 'circmoment', 'circcorrcoef', 'rayleightest',
'vtest', 'vonmisesmle']
__doctest_requires__ = {'vtest': ['scipy']}
def _components(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized rectangular components
# of the circular data.
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError('Weights and data have inconsistent shape.')
C = np.sum(weights * np.cos(p * (data - phi)), axis)/np.sum(weights, axis)
S = np.sum(weights * np.sin(p * (data - phi)), axis)/np.sum(weights, axis)
return C, S
def _angle(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample mean angle
C, S = _components(data, p, phi, axis, weights)
# theta will be an angle in the interval [-np.pi, np.pi)
# [-180, 180)*u.deg in case data is a Quantity
theta = np.arctan2(S, C)
if isinstance(data, Quantity):
theta = theta.to(data.unit)
return theta
def _length(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample length
C, S = _components(data, p, phi, axis, weights)
return np.hypot(S, C)
def circmean(data, axis=None, weights=None):
""" Computes the circular mean angle of an array of circular data.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular means are computed. The default is to compute
the mean of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22, for
detailed explanation.
Returns
-------
circmean : numpy.ndarray or Quantity
Circular mean.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmean
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmean(data) # doctest: +FLOAT_CMP
<Quantity 48.62718088722989 deg>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
return _angle(data, 1, 0.0, axis, weights)
def circvar(data, axis=None, weights=None):
""" Computes the circular variance of an array of circular data.
There are some concepts for defining measures of dispersion for circular
data. The variance implemented here is based on the definition given by
[1]_, which is also the same used by the R package 'CircStats' [2]_.
Parameters
----------
data : numpy.ndarray or dimensionless Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circvar : numpy.ndarray or dimensionless Quantity
Circular variance.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circvar
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circvar(data) # doctest: +FLOAT_CMP
<Quantity 0.16356352748437508>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
Notes
-----
The definition used here differs from the one in scipy.stats.circvar.
Precisely, Scipy circvar uses an approximation based on the limit of small
angles which approaches the linear variance.
"""
return 1.0 - _length(data, 1, 0.0, axis, weights)
def circmoment(data, p=1.0, centered=False, axis=None, weights=None):
""" Computes the ``p``-th trigonometric circular moment for an array
of circular data.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
p : float, optional
Order of the circular moment.
centered : Boolean, optional
If ``True``, central circular moments are computed. Default value is
``False``.
axis : int, optional
Axis along which circular moments are computed. The default is to
compute the circular moment of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circmoment : numpy.ndarray or Quantity
The first and second elements correspond to the direction and length of
the ``p``-th circular moment, respectively.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmoment
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmoment(data, p=2) # doctest: +FLOAT_CMP
(<Quantity 90.99263082432564 deg>, <Quantity 0.48004283892950717>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if centered:
phi = circmean(data, axis, weights)
else:
phi = 0.0
return _angle(data, p, phi, axis, weights), _length(data, p, phi, axis,
weights)
def circcorrcoef(alpha, beta, axis=None, weights_alpha=None,
weights_beta=None):
""" Computes the circular correlation coefficient between two array of
circular data.
Parameters
----------
alpha : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
beta : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular correlation coefficients are computed.
The default is the compute the circular correlation coefficient of the
flattened array.
weights_alpha : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights_alpha``
represents a weighting factor for each group such that
``sum(weights_alpha, axis)`` equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
weights_beta : numpy.ndarray, optional
See description of ``weights_alpha``.
Returns
-------
rho : numpy.ndarray or dimensionless Quantity
Circular correlation coefficient.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circcorrcoef
>>> from astropy import units as u
>>> alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302,
... 324, 85, 324, 340, 157, 238, 254, 146, 232, 122,
... 329])*u.deg
>>> beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94,
... 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
>>> circcorrcoef(alpha, beta) # doctest: +FLOAT_CMP
<Quantity 0.2704648826748831>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if(np.size(alpha, axis) != np.size(beta, axis)):
raise ValueError("alpha and beta must be arrays of the same size")
mu_a = circmean(alpha, axis, weights_alpha)
mu_b = circmean(beta, axis, weights_beta)
sin_a = np.sin(alpha - mu_a)
sin_b = np.sin(beta - mu_b)
rho = np.sum(sin_a*sin_b)/np.sqrt(np.sum(sin_a*sin_a)*np.sum(sin_b*sin_b))
return rho
def rayleightest(data, axis=None, weights=None):
""" Performs the Rayleigh test of uniformity.
This test is used to identify a non-uniform distribution, i.e. it is
designed for detecting an unimodal deviation from uniformity. More
precisely, it assumes the following hypotheses:
- H0 (null hypothesis): The population is distributed uniformly around the
circle.
- H1 (alternative hypothesis): The population is not distributed uniformly
around the circle.
Small p-values suggest to reject the null hypothesis.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the Rayleigh test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``np.sum(weights, axis)``
equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
Returns
-------
p-value : float or dimensionless Quantity
p-value.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import rayleightest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> rayleightest(data) # doctest: +FLOAT_CMP
<Quantity 0.2563487733797317>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
.. [4] D. Wilkie. "Rayleigh Test for Randomness of Circular Data". Applied
Statistics. 1983.
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.211.4762>
"""
n = np.size(data, axis=axis)
Rbar = _length(data, 1, 0.0, axis, weights)
z = n*Rbar*Rbar
# see [3] and [4] for the formulae below
tmp = 1.0
if(n < 50):
tmp = 1.0 + (2.0*z - z*z)/(4.0*n) - (24.0*z - 132.0*z**2.0 +
76.0*z**3.0 - 9.0*z**4.0)/(288.0 *
n * n)
p_value = np.exp(-z)*tmp
return p_value
def vtest(data, mu=0.0, axis=None, weights=None):
""" Performs the Rayleigh test of uniformity where the alternative
hypothesis H1 is assumed to have a known mean angle ``mu``.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
mu : float or Quantity, optional
Mean angle. Assumed to be known.
axis : int, optional
Axis along which the V test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
p-value : float or dimensionless Quantity
p-value.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vtest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vtest(data) # doctest: +FLOAT_CMP
<Quantity 0.6223678199713766>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
"""
from scipy.stats import norm
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError('Weights and data have inconsistent shape.')
n = np.size(data, axis=axis)
R0bar = np.sum(weights * np.cos(data - mu), axis)/np.sum(weights, axis)
z = np.sqrt(2.0 * n) * R0bar
pz = norm.cdf(z)
fz = norm.pdf(z)
# see reference [3]
p_value = 1 - pz + fz*((3*z - z**3)/(16.0*n) +
(15*z + 305*z**3 - 125*z**5 + 9*z**7)/(4608.0*n*n))
return p_value
def _A1inv(x):
# Approximation for _A1inv(x) according R Package 'CircStats'
# See http://www.scienceasia.org/2012.38.n1/scias38_118.pdf, equation (4)
if 0 <= x < 0.53:
return 2.0*x + x*x*x + (5.0*x**5)/6.0
elif x < 0.85:
return -0.4 + 1.39*x + 0.43/(1.0 - x)
else:
return 1.0/(x*x*x - 4.0*x*x + 3.0*x)
def vonmisesmle(data, axis=None):
""" Computes the Maximum Likelihood Estimator (MLE) for the parameters of
the von Mises distribution.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the mle will be computed.
Returns
-------
mu : float or Quantity
the mean (aka location parameter).
kappa : float or dimensionless Quantity
the concentration parameter.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vonmisesmle
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vonmisesmle(data) # doctest: +FLOAT_CMP
(<Quantity 101.16894320013179 deg>, <Quantity 1.49358958737054>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
mu = circmean(data, axis=None)
kappa = _A1inv(np.mean(np.cos(data - mu), axis))
return mu, kappa
|
|
"""Tests for the Entity Registry."""
from unittest.mock import patch
import pytest
from homeassistant.const import EVENT_HOMEASSISTANT_START, STATE_UNAVAILABLE
from homeassistant.core import CoreState, callback, valid_entity_id
from homeassistant.helpers import entity_registry
from tests.common import (
MockConfigEntry,
flush_store,
mock_device_registry,
mock_registry,
)
YAML__OPEN_PATH = "homeassistant.util.yaml.loader.open"
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def update_events(hass):
"""Capture update events."""
events = []
@callback
def async_capture(event):
events.append(event.data)
hass.bus.async_listen(entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, async_capture)
return events
async def test_get_or_create_returns_same_entry(hass, registry, update_events):
"""Make sure we do not duplicate entries."""
entry = registry.async_get_or_create("light", "hue", "1234")
entry2 = registry.async_get_or_create("light", "hue", "1234")
await hass.async_block_till_done()
assert len(registry.entities) == 1
assert entry is entry2
assert entry.entity_id == "light.hue_1234"
assert len(update_events) == 1
assert update_events[0]["action"] == "create"
assert update_events[0]["entity_id"] == entry.entity_id
def test_get_or_create_suggested_object_id(registry):
"""Test that suggested_object_id works."""
entry = registry.async_get_or_create(
"light", "hue", "1234", suggested_object_id="beer"
)
assert entry.entity_id == "light.beer"
def test_get_or_create_updates_data(registry):
"""Test that we update data in get_or_create."""
orig_config_entry = MockConfigEntry(domain="light")
orig_entry = registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=orig_config_entry,
device_id="mock-dev-id",
capabilities={"max": 100},
supported_features=5,
device_class="mock-device-class",
disabled_by=entity_registry.DISABLED_HASS,
unit_of_measurement="initial-unit_of_measurement",
original_name="initial-original_name",
original_icon="initial-original_icon",
)
assert orig_entry.config_entry_id == orig_config_entry.entry_id
assert orig_entry.device_id == "mock-dev-id"
assert orig_entry.capabilities == {"max": 100}
assert orig_entry.supported_features == 5
assert orig_entry.device_class == "mock-device-class"
assert orig_entry.disabled_by == entity_registry.DISABLED_HASS
assert orig_entry.unit_of_measurement == "initial-unit_of_measurement"
assert orig_entry.original_name == "initial-original_name"
assert orig_entry.original_icon == "initial-original_icon"
new_config_entry = MockConfigEntry(domain="light")
new_entry = registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=new_config_entry,
device_id="new-mock-dev-id",
capabilities={"new-max": 100},
supported_features=10,
device_class="new-mock-device-class",
disabled_by=entity_registry.DISABLED_USER,
unit_of_measurement="updated-unit_of_measurement",
original_name="updated-original_name",
original_icon="updated-original_icon",
)
assert new_entry.config_entry_id == new_config_entry.entry_id
assert new_entry.device_id == "new-mock-dev-id"
assert new_entry.capabilities == {"new-max": 100}
assert new_entry.supported_features == 10
assert new_entry.device_class == "new-mock-device-class"
assert new_entry.unit_of_measurement == "updated-unit_of_measurement"
assert new_entry.original_name == "updated-original_name"
assert new_entry.original_icon == "updated-original_icon"
# Should not be updated
assert new_entry.disabled_by == entity_registry.DISABLED_HASS
def test_get_or_create_suggested_object_id_conflict_register(registry):
"""Test that we don't generate an entity id that is already registered."""
entry = registry.async_get_or_create(
"light", "hue", "1234", suggested_object_id="beer"
)
entry2 = registry.async_get_or_create(
"light", "hue", "5678", suggested_object_id="beer"
)
assert entry.entity_id == "light.beer"
assert entry2.entity_id == "light.beer_2"
def test_get_or_create_suggested_object_id_conflict_existing(hass, registry):
"""Test that we don't generate an entity id that currently exists."""
hass.states.async_set("light.hue_1234", "on")
entry = registry.async_get_or_create("light", "hue", "1234")
assert entry.entity_id == "light.hue_1234_2"
def test_create_triggers_save(hass, registry):
"""Test that registering entry triggers a save."""
with patch.object(registry, "async_schedule_save") as mock_schedule_save:
registry.async_get_or_create("light", "hue", "1234")
assert len(mock_schedule_save.mock_calls) == 1
async def test_loading_saving_data(hass, registry):
"""Test that we load/save data correctly."""
mock_config = MockConfigEntry(domain="light")
orig_entry1 = registry.async_get_or_create("light", "hue", "1234")
orig_entry2 = registry.async_get_or_create(
"light",
"hue",
"5678",
device_id="mock-dev-id",
area_id="mock-area-id",
config_entry=mock_config,
capabilities={"max": 100},
supported_features=5,
device_class="mock-device-class",
disabled_by=entity_registry.DISABLED_HASS,
original_name="Original Name",
original_icon="hass:original-icon",
)
orig_entry2 = registry.async_update_entity(
orig_entry2.entity_id, name="User Name", icon="hass:user-icon"
)
assert len(registry.entities) == 2
# Now load written data in new registry
registry2 = entity_registry.EntityRegistry(hass)
await flush_store(registry._store)
await registry2.async_load()
# Ensure same order
assert list(registry.entities) == list(registry2.entities)
new_entry1 = registry.async_get_or_create("light", "hue", "1234")
new_entry2 = registry.async_get_or_create("light", "hue", "5678")
assert orig_entry1 == new_entry1
assert orig_entry2 == new_entry2
assert new_entry2.device_id == "mock-dev-id"
assert new_entry2.area_id == "mock-area-id"
assert new_entry2.disabled_by == entity_registry.DISABLED_HASS
assert new_entry2.capabilities == {"max": 100}
assert new_entry2.supported_features == 5
assert new_entry2.device_class == "mock-device-class"
assert new_entry2.name == "User Name"
assert new_entry2.icon == "hass:user-icon"
assert new_entry2.original_name == "Original Name"
assert new_entry2.original_icon == "hass:original-icon"
def test_generate_entity_considers_registered_entities(registry):
"""Test that we don't create entity id that are already registered."""
entry = registry.async_get_or_create("light", "hue", "1234")
assert entry.entity_id == "light.hue_1234"
assert registry.async_generate_entity_id("light", "hue_1234") == "light.hue_1234_2"
def test_generate_entity_considers_existing_entities(hass, registry):
"""Test that we don't create entity id that currently exists."""
hass.states.async_set("light.kitchen", "on")
assert registry.async_generate_entity_id("light", "kitchen") == "light.kitchen_2"
def test_is_registered(registry):
"""Test that is_registered works."""
entry = registry.async_get_or_create("light", "hue", "1234")
assert registry.async_is_registered(entry.entity_id)
assert not registry.async_is_registered("light.non_existing")
@pytest.mark.parametrize("load_registries", [False])
async def test_loading_extra_values(hass, hass_storage):
"""Test we load extra data from the registry."""
hass_storage[entity_registry.STORAGE_KEY] = {
"version": entity_registry.STORAGE_VERSION,
"data": {
"entities": [
{
"entity_id": "test.named",
"platform": "super_platform",
"unique_id": "with-name",
"name": "registry override",
},
{
"entity_id": "test.no_name",
"platform": "super_platform",
"unique_id": "without-name",
},
{
"entity_id": "test.disabled_user",
"platform": "super_platform",
"unique_id": "disabled-user",
"disabled_by": "user",
},
{
"entity_id": "test.disabled_hass",
"platform": "super_platform",
"unique_id": "disabled-hass",
"disabled_by": "hass",
},
{
"entity_id": "test.invalid__entity",
"platform": "super_platform",
"unique_id": "invalid-hass",
"disabled_by": "hass",
},
]
},
}
await entity_registry.async_load(hass)
registry = entity_registry.async_get(hass)
assert len(registry.entities) == 4
entry_with_name = registry.async_get_or_create(
"test", "super_platform", "with-name"
)
entry_without_name = registry.async_get_or_create(
"test", "super_platform", "without-name"
)
assert entry_with_name.name == "registry override"
assert entry_without_name.name is None
assert not entry_with_name.disabled
entry_disabled_hass = registry.async_get_or_create(
"test", "super_platform", "disabled-hass"
)
entry_disabled_user = registry.async_get_or_create(
"test", "super_platform", "disabled-user"
)
assert entry_disabled_hass.disabled
assert entry_disabled_hass.disabled_by == entity_registry.DISABLED_HASS
assert entry_disabled_user.disabled
assert entry_disabled_user.disabled_by == entity_registry.DISABLED_USER
def test_async_get_entity_id(registry):
"""Test that entity_id is returned."""
entry = registry.async_get_or_create("light", "hue", "1234")
assert entry.entity_id == "light.hue_1234"
assert registry.async_get_entity_id("light", "hue", "1234") == "light.hue_1234"
assert registry.async_get_entity_id("light", "hue", "123") is None
async def test_updating_config_entry_id(hass, registry, update_events):
"""Test that we update config entry id in registry."""
mock_config_1 = MockConfigEntry(domain="light", entry_id="mock-id-1")
entry = registry.async_get_or_create(
"light", "hue", "5678", config_entry=mock_config_1
)
mock_config_2 = MockConfigEntry(domain="light", entry_id="mock-id-2")
entry2 = registry.async_get_or_create(
"light", "hue", "5678", config_entry=mock_config_2
)
assert entry.entity_id == entry2.entity_id
assert entry2.config_entry_id == "mock-id-2"
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["entity_id"] == entry.entity_id
assert update_events[1]["action"] == "update"
assert update_events[1]["entity_id"] == entry.entity_id
assert update_events[1]["changes"] == {"config_entry_id": "mock-id-1"}
async def test_removing_config_entry_id(hass, registry, update_events):
"""Test that we update config entry id in registry."""
mock_config = MockConfigEntry(domain="light", entry_id="mock-id-1")
entry = registry.async_get_or_create(
"light", "hue", "5678", config_entry=mock_config
)
assert entry.config_entry_id == "mock-id-1"
registry.async_clear_config_entry("mock-id-1")
assert not registry.entities
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["entity_id"] == entry.entity_id
assert update_events[1]["action"] == "remove"
assert update_events[1]["entity_id"] == entry.entity_id
async def test_removing_area_id(registry):
"""Make sure we can clear area id."""
entry = registry.async_get_or_create("light", "hue", "5678")
entry_w_area = registry.async_update_entity(entry.entity_id, area_id="12345A")
registry.async_clear_area_id("12345A")
entry_wo_area = registry.async_get(entry.entity_id)
assert not entry_wo_area.area_id
assert entry_w_area != entry_wo_area
@pytest.mark.parametrize("load_registries", [False])
async def test_migration(hass):
"""Test migration from old data to new."""
mock_config = MockConfigEntry(domain="test-platform", entry_id="test-config-id")
old_conf = {
"light.kitchen": {
"config_entry_id": "test-config-id",
"unique_id": "test-unique",
"platform": "test-platform",
"name": "Test Name",
"disabled_by": "hass",
}
}
with patch("os.path.isfile", return_value=True), patch("os.remove"), patch(
"homeassistant.helpers.entity_registry.load_yaml", return_value=old_conf
):
await entity_registry.async_load(hass)
registry = entity_registry.async_get(hass)
assert registry.async_is_registered("light.kitchen")
entry = registry.async_get_or_create(
domain="light",
platform="test-platform",
unique_id="test-unique",
config_entry=mock_config,
)
assert entry.name == "Test Name"
assert entry.disabled_by == "hass"
assert entry.config_entry_id == "test-config-id"
async def test_loading_invalid_entity_id(hass, hass_storage):
"""Test we autofix invalid entity IDs."""
hass_storage[entity_registry.STORAGE_KEY] = {
"version": entity_registry.STORAGE_VERSION,
"data": {
"entities": [
{
"entity_id": "test.invalid__middle",
"platform": "super_platform",
"unique_id": "id-invalid-middle",
"name": "registry override",
},
{
"entity_id": "test.invalid_end_",
"platform": "super_platform",
"unique_id": "id-invalid-end",
},
{
"entity_id": "test._invalid_start",
"platform": "super_platform",
"unique_id": "id-invalid-start",
},
]
},
}
registry = await entity_registry.async_get_registry(hass)
entity_invalid_middle = registry.async_get_or_create(
"test", "super_platform", "id-invalid-middle"
)
assert valid_entity_id(entity_invalid_middle.entity_id)
entity_invalid_end = registry.async_get_or_create(
"test", "super_platform", "id-invalid-end"
)
assert valid_entity_id(entity_invalid_end.entity_id)
entity_invalid_start = registry.async_get_or_create(
"test", "super_platform", "id-invalid-start"
)
assert valid_entity_id(entity_invalid_start.entity_id)
async def test_update_entity_unique_id(registry):
"""Test entity's unique_id is updated."""
mock_config = MockConfigEntry(domain="light", entry_id="mock-id-1")
entry = registry.async_get_or_create(
"light", "hue", "5678", config_entry=mock_config
)
assert registry.async_get_entity_id("light", "hue", "5678") == entry.entity_id
new_unique_id = "1234"
with patch.object(registry, "async_schedule_save") as mock_schedule_save:
updated_entry = registry.async_update_entity(
entry.entity_id, new_unique_id=new_unique_id
)
assert updated_entry != entry
assert updated_entry.unique_id == new_unique_id
assert mock_schedule_save.call_count == 1
assert registry.async_get_entity_id("light", "hue", "5678") is None
assert registry.async_get_entity_id("light", "hue", "1234") == entry.entity_id
async def test_update_entity_unique_id_conflict(registry):
"""Test migration raises when unique_id already in use."""
mock_config = MockConfigEntry(domain="light", entry_id="mock-id-1")
entry = registry.async_get_or_create(
"light", "hue", "5678", config_entry=mock_config
)
entry2 = registry.async_get_or_create(
"light", "hue", "1234", config_entry=mock_config
)
with patch.object(
registry, "async_schedule_save"
) as mock_schedule_save, pytest.raises(ValueError):
registry.async_update_entity(entry.entity_id, new_unique_id=entry2.unique_id)
assert mock_schedule_save.call_count == 0
assert registry.async_get_entity_id("light", "hue", "5678") == entry.entity_id
assert registry.async_get_entity_id("light", "hue", "1234") == entry2.entity_id
async def test_update_entity(registry):
"""Test updating entity."""
mock_config = MockConfigEntry(domain="light", entry_id="mock-id-1")
entry = registry.async_get_or_create(
"light", "hue", "5678", config_entry=mock_config
)
for attr_name, new_value in (
("name", "new name"),
("icon", "new icon"),
("disabled_by", entity_registry.DISABLED_USER),
):
changes = {attr_name: new_value}
updated_entry = registry.async_update_entity(entry.entity_id, **changes)
assert updated_entry != entry
assert getattr(updated_entry, attr_name) == new_value
assert getattr(updated_entry, attr_name) != getattr(entry, attr_name)
assert (
registry.async_get_entity_id("light", "hue", "5678")
== updated_entry.entity_id
)
entry = updated_entry
async def test_disabled_by(registry):
"""Test that we can disable an entry when we create it."""
entry = registry.async_get_or_create("light", "hue", "5678", disabled_by="hass")
assert entry.disabled_by == "hass"
entry = registry.async_get_or_create(
"light", "hue", "5678", disabled_by="integration"
)
assert entry.disabled_by == "hass"
entry2 = registry.async_get_or_create("light", "hue", "1234")
assert entry2.disabled_by is None
async def test_disabled_by_system_options(registry):
"""Test system options setting disabled_by."""
mock_config = MockConfigEntry(
domain="light",
entry_id="mock-id-1",
system_options={"disable_new_entities": True},
)
entry = registry.async_get_or_create(
"light", "hue", "AAAA", config_entry=mock_config
)
assert entry.disabled_by == "integration"
entry2 = registry.async_get_or_create(
"light", "hue", "BBBB", config_entry=mock_config, disabled_by="user"
)
assert entry2.disabled_by == "user"
async def test_restore_states(hass):
"""Test restoring states."""
hass.state = CoreState.not_running
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
"light",
"hue",
"1234",
suggested_object_id="simple",
)
# Should not be created
registry.async_get_or_create(
"light",
"hue",
"5678",
suggested_object_id="disabled",
disabled_by=entity_registry.DISABLED_HASS,
)
registry.async_get_or_create(
"light",
"hue",
"9012",
suggested_object_id="all_info_set",
capabilities={"max": 100},
supported_features=5,
device_class="mock-device-class",
original_name="Mock Original Name",
original_icon="hass:original-icon",
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
simple = hass.states.get("light.simple")
assert simple is not None
assert simple.state == STATE_UNAVAILABLE
assert simple.attributes == {"restored": True, "supported_features": 0}
disabled = hass.states.get("light.disabled")
assert disabled is None
all_info_set = hass.states.get("light.all_info_set")
assert all_info_set is not None
assert all_info_set.state == STATE_UNAVAILABLE
assert all_info_set.attributes == {
"max": 100,
"supported_features": 5,
"device_class": "mock-device-class",
"restored": True,
"friendly_name": "Mock Original Name",
"icon": "hass:original-icon",
}
registry.async_remove("light.disabled")
registry.async_remove("light.simple")
registry.async_remove("light.all_info_set")
await hass.async_block_till_done()
assert hass.states.get("light.simple") is None
assert hass.states.get("light.disabled") is None
assert hass.states.get("light.all_info_set") is None
async def test_async_get_device_class_lookup(hass):
"""Test registry device class lookup."""
hass.state = CoreState.not_running
ent_reg = await entity_registry.async_get_registry(hass)
ent_reg.async_get_or_create(
"binary_sensor",
"light",
"battery_charging",
device_id="light_device_entry_id",
device_class="battery_charging",
)
ent_reg.async_get_or_create(
"sensor",
"light",
"battery",
device_id="light_device_entry_id",
device_class="battery",
)
ent_reg.async_get_or_create(
"light", "light", "demo", device_id="light_device_entry_id"
)
ent_reg.async_get_or_create(
"binary_sensor",
"vacuum",
"battery_charging",
device_id="vacuum_device_entry_id",
device_class="battery_charging",
)
ent_reg.async_get_or_create(
"sensor",
"vacuum",
"battery",
device_id="vacuum_device_entry_id",
device_class="battery",
)
ent_reg.async_get_or_create(
"vacuum", "vacuum", "demo", device_id="vacuum_device_entry_id"
)
ent_reg.async_get_or_create(
"binary_sensor",
"remote",
"battery_charging",
device_id="remote_device_entry_id",
device_class="battery_charging",
)
ent_reg.async_get_or_create(
"remote", "remote", "demo", device_id="remote_device_entry_id"
)
device_lookup = ent_reg.async_get_device_class_lookup(
{("binary_sensor", "battery_charging"), ("sensor", "battery")}
)
assert device_lookup == {
"remote_device_entry_id": {
(
"binary_sensor",
"battery_charging",
): "binary_sensor.remote_battery_charging"
},
"light_device_entry_id": {
(
"binary_sensor",
"battery_charging",
): "binary_sensor.light_battery_charging",
("sensor", "battery"): "sensor.light_battery",
},
"vacuum_device_entry_id": {
(
"binary_sensor",
"battery_charging",
): "binary_sensor.vacuum_battery_charging",
("sensor", "battery"): "sensor.vacuum_battery",
},
}
async def test_remove_device_removes_entities(hass, registry):
"""Test that we remove entities tied to a device."""
device_registry = mock_device_registry(hass)
config_entry = MockConfigEntry(domain="light")
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={("mac", "12:34:56:AB:CD:EF")},
)
entry = registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=config_entry,
device_id=device_entry.id,
)
assert registry.async_is_registered(entry.entity_id)
device_registry.async_remove_device(device_entry.id)
await hass.async_block_till_done()
assert not registry.async_is_registered(entry.entity_id)
async def test_update_device_race(hass, registry):
"""Test race when a device is created, updated and removed."""
device_registry = mock_device_registry(hass)
config_entry = MockConfigEntry(domain="light")
# Create device
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={("mac", "12:34:56:AB:CD:EF")},
)
# Update it
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={("bridgeid", "0123")},
connections={("mac", "12:34:56:AB:CD:EF")},
)
# Add entity to the device
entry = registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=config_entry,
device_id=device_entry.id,
)
assert registry.async_is_registered(entry.entity_id)
device_registry.async_remove_device(device_entry.id)
await hass.async_block_till_done()
assert not registry.async_is_registered(entry.entity_id)
async def test_disable_device_disables_entities(hass, registry):
"""Test that we disable entities tied to a device."""
device_registry = mock_device_registry(hass)
config_entry = MockConfigEntry(domain="light")
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={("mac", "12:34:56:AB:CD:EF")},
)
entry1 = registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=config_entry,
device_id=device_entry.id,
)
entry2 = registry.async_get_or_create(
"light",
"hue",
"ABCD",
config_entry=config_entry,
device_id=device_entry.id,
disabled_by="user",
)
entry3 = registry.async_get_or_create(
"light",
"hue",
"EFGH",
config_entry=config_entry,
device_id=device_entry.id,
disabled_by="config_entry",
)
assert not entry1.disabled
assert entry2.disabled
assert entry3.disabled
device_registry.async_update_device(device_entry.id, disabled_by="user")
await hass.async_block_till_done()
entry1 = registry.async_get(entry1.entity_id)
assert entry1.disabled
assert entry1.disabled_by == "device"
entry2 = registry.async_get(entry2.entity_id)
assert entry2.disabled
assert entry2.disabled_by == "user"
entry3 = registry.async_get(entry3.entity_id)
assert entry3.disabled
assert entry3.disabled_by == "config_entry"
device_registry.async_update_device(device_entry.id, disabled_by=None)
await hass.async_block_till_done()
entry1 = registry.async_get(entry1.entity_id)
assert not entry1.disabled
entry2 = registry.async_get(entry2.entity_id)
assert entry2.disabled
assert entry2.disabled_by == "user"
entry3 = registry.async_get(entry3.entity_id)
assert entry3.disabled
assert entry3.disabled_by == "config_entry"
async def test_disable_config_entry_disables_entities(hass, registry):
"""Test that we disable entities tied to a config entry."""
device_registry = mock_device_registry(hass)
config_entry = MockConfigEntry(domain="light")
config_entry.add_to_hass(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={("mac", "12:34:56:AB:CD:EF")},
)
entry1 = registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=config_entry,
device_id=device_entry.id,
)
entry2 = registry.async_get_or_create(
"light",
"hue",
"ABCD",
config_entry=config_entry,
device_id=device_entry.id,
disabled_by="user",
)
entry3 = registry.async_get_or_create(
"light",
"hue",
"EFGH",
config_entry=config_entry,
device_id=device_entry.id,
disabled_by="device",
)
assert not entry1.disabled
assert entry2.disabled
assert entry3.disabled
await hass.config_entries.async_set_disabled_by(config_entry.entry_id, "user")
await hass.async_block_till_done()
entry1 = registry.async_get(entry1.entity_id)
assert entry1.disabled
assert entry1.disabled_by == "config_entry"
entry2 = registry.async_get(entry2.entity_id)
assert entry2.disabled
assert entry2.disabled_by == "user"
entry3 = registry.async_get(entry3.entity_id)
assert entry3.disabled
assert entry3.disabled_by == "device"
await hass.config_entries.async_set_disabled_by(config_entry.entry_id, None)
await hass.async_block_till_done()
entry1 = registry.async_get(entry1.entity_id)
assert not entry1.disabled
entry2 = registry.async_get(entry2.entity_id)
assert entry2.disabled
assert entry2.disabled_by == "user"
# The device was re-enabled, so entity disabled by the device will be re-enabled too
entry3 = registry.async_get(entry3.entity_id)
assert not entry3.disabled_by
async def test_disabled_entities_excluded_from_entity_list(hass, registry):
"""Test that disabled entities are excluded from async_entries_for_device."""
device_registry = mock_device_registry(hass)
config_entry = MockConfigEntry(domain="light")
device_entry = device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={("mac", "12:34:56:AB:CD:EF")},
)
entry1 = registry.async_get_or_create(
"light",
"hue",
"5678",
config_entry=config_entry,
device_id=device_entry.id,
)
entry2 = registry.async_get_or_create(
"light",
"hue",
"ABCD",
config_entry=config_entry,
device_id=device_entry.id,
disabled_by="user",
)
entries = entity_registry.async_entries_for_device(registry, device_entry.id)
assert entries == [entry1]
entries = entity_registry.async_entries_for_device(
registry, device_entry.id, include_disabled_entities=True
)
assert entries == [entry1, entry2]
|
|
__author__ = 'Girish'
# Bridge Edges v4
#
# Find the bridge edges in a graph given the
# algorithm in lecture.
# Complete the intermediate steps
# - create_rooted_spanning_tree
# - post_order
# - number_of_descendants
# - lowest_post_order
# - highest_post_order
#
# And then combine them together in
# `bridge_edges`
# So far, we've represented graphs
# as a dictionary where G[n1][n2] == 1
# meant there was an edge between n1 and n2
#
# In order to represent a spanning tree
# we need to create two classes of edges
# we'll refer to them as "green" and "red"
# for the green and red edges as specified in lecture
#
# So, for example, the graph given in lecture
# G = {'a': {'c': 1, 'b': 1},
# 'b': {'a': 1, 'd': 1},
# 'c': {'a': 1, 'd': 1},
# 'd': {'c': 1, 'b': 1, 'e': 1},
# 'e': {'d': 1, 'g': 1, 'f': 1},
# 'f': {'e': 1, 'g': 1},
# 'g': {'e': 1, 'f': 1}
# }
# would be written as a spanning tree
# S = {'a': {'c': 'green', 'b': 'green'},
# 'b': {'a': 'green', 'd': 'red'},
# 'c': {'a': 'green', 'd': 'green'},
# 'd': {'c': 'green', 'b': 'red', 'e': 'green'},
# 'e': {'d': 'green', 'g': 'green', 'f': 'green'},
# 'f': {'e': 'green', 'g': 'red'},
# 'g': {'e': 'green', 'f': 'red'}
# }
#
def create_rooted_spanning_tree(G, root):
S=dict.fromkeys(G.keys())
for i in S.keys():
S[i]={}
queue=[root]
visited={}
visited[root]=1
while queue:
now = queue.pop(0)
for neighbor in G[now]:
if neighbor not in visited:
visited[neighbor]=1
S[now][neighbor] = 'green'
S[neighbor][now] = 'green'
queue.append(neighbor)
elif S[neighbor].get(now,"")!='green':
S[now][neighbor] = 'red'
S[neighbor][now] = 'red'
print(visited)
# your code here
return S
# This is just one possible solution
# There are other ways to create a
# spanning tree, and the grader will
# accept any valid result
# feel free to edit the test to
# match the solution your program produces
def test_create_rooted_spanning_tree():
G = {'a': {'c': 1, 'b': 1},
'b': {'a': 1, 'd': 1},
'c': {'a': 1, 'd': 1},
'd': {'c': 1, 'b': 1, 'e': 1},
'e': {'d': 1, 'g': 1, 'f': 1},
'f': {'e': 1, 'g': 1},
'g': {'e': 1, 'f': 1}
}
S = create_rooted_spanning_tree(G, "a")
print(S)
assert S == {'a': {'c': 'green', 'b': 'green'},
'b': {'a': 'green', 'd': 'red'},
'c': {'a': 'green', 'd': 'green'},
'd': {'c': 'green', 'b': 'red', 'e': 'green'},
'e': {'d': 'green', 'g': 'green', 'f': 'green'},
'f': {'e': 'green', 'g': 'red'},
'g': {'e': 'green', 'f': 'red'}
}
###########
test_create_rooted_spanning_tree()
def post_order(S, root):
# return mapping between nodes of S and the post-order value
# of that node
pass
# This is just one possible solution
# There are other ways to create a
# spanning tree, and the grader will
# accept any valid result.
# feel free to edit the test to
# match the solution your program produces
def test_post_order():
S = {'a': {'c': 'green', 'b': 'green'},
'b': {'a': 'green', 'd': 'red'},
'c': {'a': 'green', 'd': 'green'},
'd': {'c': 'green', 'b': 'red', 'e': 'green'},
'e': {'d': 'green', 'g': 'green', 'f': 'green'},
'f': {'e': 'green', 'g': 'red'},
'g': {'e': 'green', 'f': 'red'}
}
po = post_order(S, 'a')
assert po == {'a':7, 'b':1, 'c':6, 'd':5, 'e':4, 'f':2, 'g':3}
##############
def number_of_descendants(S, root):
# return mapping between nodes of S and the number of descendants
# of that node
pass
def test_number_of_descendants():
S = {'a': {'c': 'green', 'b': 'green'},
'b': {'a': 'green', 'd': 'red'},
'c': {'a': 'green', 'd': 'green'},
'd': {'c': 'green', 'b': 'red', 'e': 'green'},
'e': {'d': 'green', 'g': 'green', 'f': 'green'},
'f': {'e': 'green', 'g': 'red'},
'g': {'e': 'green', 'f': 'red'}
}
nd = number_of_descendants(S, 'a')
assert nd == {'a':7, 'b':1, 'c':5, 'd':4, 'e':3, 'f':1, 'g':1}
###############
def lowest_post_order(S, root, po):
# return a mapping of the nodes in S
# to the lowest post order value
# below that node
# (and you're allowed to follow 1 red edge)
pass
def test_lowest_post_order():
S = {'a': {'c': 'green', 'b': 'green'},
'b': {'a': 'green', 'd': 'red'},
'c': {'a': 'green', 'd': 'green'},
'd': {'c': 'green', 'b': 'red', 'e': 'green'},
'e': {'d': 'green', 'g': 'green', 'f': 'green'},
'f': {'e': 'green', 'g': 'red'},
'g': {'e': 'green', 'f': 'red'}
}
po = post_order(S, 'a')
l = lowest_post_order(S, 'a', po)
assert l == {'a':1, 'b':1, 'c':1, 'd':1, 'e':2, 'f':2, 'g':2}
################
def highest_post_order(S, root, po):
# return a mapping of the nodes in S
# to the highest post order value
# below that node
# (and you're allowed to follow 1 red edge)
pass
def test_highest_post_order():
S = {'a': {'c': 'green', 'b': 'green'},
'b': {'a': 'green', 'd': 'red'},
'c': {'a': 'green', 'd': 'green'},
'd': {'c': 'green', 'b': 'red', 'e': 'green'},
'e': {'d': 'green', 'g': 'green', 'f': 'green'},
'f': {'e': 'green', 'g': 'red'},
'g': {'e': 'green', 'f': 'red'}
}
po = post_order(S, 'a')
h = highest_post_order(S, 'a', po)
assert h == {'a':7, 'b':5, 'c':6, 'd':5, 'e':4, 'f':3, 'g':3}
#################
def bridge_edges(G, root):
# use the four functions above
# and then determine which edges in G are bridge edges
# return them as a list of tuples ie: [(n1, n2), (n4, n5)]
pass
def test_bridge_edges():
G = {'a': {'c': 1, 'b': 1},
'b': {'a': 1, 'd': 1},
'c': {'a': 1, 'd': 1},
'd': {'c': 1, 'b': 1, 'e': 1},
'e': {'d': 1, 'g': 1, 'f': 1},
'f': {'e': 1, 'g': 1},
'g': {'e': 1, 'f': 1}
}
bridges = bridge_edges(G, 'a')
assert bridges == [('d', 'e')]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ReplicationAlertSettingsOperations(object):
"""ReplicationAlertSettingsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicessiterecovery.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AlertCollection"]
"""Gets the list of configured email notification(alert) configurations.
Gets the list of email notification(alert) configurations for the vault.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AlertCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.AlertCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AlertCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AlertCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationAlertSettings'} # type: ignore
def get(
self,
alert_setting_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Alert"
"""Gets an email notification(alert) configuration.
Gets the details of the specified email notification(alert) configuration.
:param alert_setting_name: The name of the email notification configuration.
:type alert_setting_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Alert, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicessiterecovery.models.Alert
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Alert"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'alertSettingName': self._serialize.url("alert_setting_name", alert_setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Alert', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationAlertSettings/{alertSettingName}'} # type: ignore
def create(
self,
alert_setting_name, # type: str
request, # type: "_models.ConfigureAlertRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.Alert"
"""Configures email notifications for this vault.
Create or update an email notification(alert) configuration.
:param alert_setting_name: The name of the email notification(alert) configuration.
:type alert_setting_name: str
:param request: The input to configure the email notification(alert).
:type request: ~azure.mgmt.recoveryservicessiterecovery.models.ConfigureAlertRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Alert, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicessiterecovery.models.Alert
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Alert"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'alertSettingName': self._serialize.url("alert_setting_name", alert_setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'ConfigureAlertRequest')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Alert', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationAlertSettings/{alertSettingName}'} # type: ignore
|
|
#!/usr/bin/env python
"""Neeraj's generic image aligner.
Usage: python %s
Some information is written to the saved image, in the UserComment, in our usual format.
This includes:
- all the parameters passed in via <parameters>,
- <METHOD>_TIMESTAMP@seconds_since_the_epoch, where <METHOD> is the capitalized method,
- when <method> is 'oldaffine', 'affine', 'similar', or 'simple', the matrix of the
transformation, with tag AFFINE.
Licensed under the 3-clause BSD License:
Copyright (c) 2012, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import math, os, sys, time
from nkpylib.utils import *
from nkpylib.exif import getDict, setDict
from PIL import Image, ImageDraw
import numpy as np
assert sys.platform.startswith('linux') # NO WINDOWS OR MAC!
try:
import simplejson as json
except ImportError:
import json
# CONFIG PARAMS
ALIGN_CONFIG_FNAME = 'nkaligners.json'
DEFAULT_OUTPUT_FMT = '%(inpath)s\t%(outpath)s'
DEFAULT_ERROR_FMT = 'error'
JPEG_QUALITY = 95
INTERPOLATION = Image.BICUBIC
DEFAULT_DTYPE = np.double
# SMALL UTILITY FUNCTIONS
def md5sum(s):
"""MD5s the given string of data and returns the hexdigest.
If you want the md5 of a file, call md5sum(open(fname).read())"""
import hashlib
return hashlib.md5(s).hexdigest().lower()
def getFidsFromDicts(indict, outdict, outsize=(1,1), dtype=DEFAULT_DTYPE):
"""Returns Nx3 matrices for input and output fiducial points.
If indict and outdict are dicts, then uses keys that exist in both indict
and outdict, in sorted order.
Else, assumes they're sequences of fiducials, and checks that they're same length.
The outputs are in homogenous coordinates, i.e., with last coordinate = 1.
The output points are scaled by the width and height in outsize.
If outsize is None, then outsize is assumed to be (1,1)
You can optionally specify what datatype to use for the matrices.
Returns (inpts, outpts), each as a np.array()
"""
if isinstance(indict, dict) and isinstance(outdict, dict):
keys = sorted(set(indict) & set(outdict))
else:
assert len(indict) == len(outdict)
keys = range(len(indict))
n = len(keys)
inpts = np.ones((n, 3), dtype=dtype)
outpts = np.ones((n, 3), dtype=dtype)
for i, k in enumerate(keys):
inpts[i, :2] = indict[k][:2]
outpts[i, :2] = outdict[k][:2]
if outsize is not None:
outpts[:,0] *= outsize[0]
outpts[:,1] *= outsize[1]
return (inpts, outpts)
def transformPts(trans, pts, dtype=DEFAULT_DTYPE):
"""Transforms the given points using the given transform.
The pts should be a list of pairs or triples (homogenous).
Returns a numpy Nx2 array of results"""
p = np.ones((len(pts), 3), dtype=dtype)
for i, pt in enumerate(pts):
if len(pt) == 2:
p[i, :2] = pt[:2]
elif len(pt) == 3:
p[i, :3] = pt[:3]
else:
raise ValueError('Pt length must be 2 or 3')
ret = np.dot(p, trans)
last = ret[:, 2]
ret /= last.reshape((len(pts), 1))
return ret[:, :2]
# TRANFORMATIONS
def getSimpleTransform(indict, outdict, outsize=None):
"""Mapping from input points to output points, as only a translation + uniform scaling"""
#print "Got indict %s, outdict %s, outsize %s" % (indict, outdict, outsize)
def eyecen(d):
cens = [d[k] for k in ('LEFT_EYE_OUT', 'LEFT_EYE_IN', 'RIGHT_EYE_IN', 'RIGHT_EYE_OUT')]
cenx, ceny = zip(*cens)
cenx, ceny = sum(cenx)/4.0, sum(ceny)/4.0
w = cens[-1][0] - cens[0][0]
#print cens, cenx, ceny, w
return (cenx, ceny), w
eye_in, w_in = eyecen(indict)
eye_out, w_out = eyecen(outdict)
eye_out = [e*o for e, o in zip(eye_out, outsize)]
#print eye_in, eye_out, w_in, w_out
if w_in == 0:
w_in = w_out
s = w_out/w_in
t = [(o-i*s) for i, o in zip(eye_in, eye_out)]
#print s, t
ret = np.array([[s, 0, 0], [0, s, 0], [t[0], t[1], 1.0]])
return ret
def getSimilarityTransform(indict, outdict, outsize=None):
"""Returns a similarity transformation to go from input pts to output pts.
'indict' and 'outdict' should contain identical keys mapping to 2-tuples.
A similarity transformation matrix has the form
[k cos -k sin dx] [a -b c]
T = [k sin k cos dy] or just [b a d]
[ 0 0 1] [0 0 1].
Then, for each (xin, yin), (xout, yout) correspondence, we have
[xin] [xout]
T [yin] = [yout]
[ 1 ] [ 1 ]
which is equivalent to
[a]
[xin -yin 1 0] [b] [xout]
[yin xin 0 1] [c] = [yout].
[d]
We stack up the leftmost and rightmost bits to have 2 * number_of_correspondences rows,
then find the least squares solution for (a, b, c, d), then build T. Transformations
in this code are given as the transpose of this form (so that xt A = xt' rather than A x = x'),
so we return (T.transpose(), Ax), where the 2nd term is the transformed locations of the inputs."""
# indict = {'x1': (1,1), 'x2': (3,1), 'x3': (4,1), 'x4': (6,1), 'x5': (2,5), 'x6': (5,5)}
# outdict = {'x1': (3,2), 'x2': (5,2), 'x3': (6,2), 'x4': (8,2), 'x5': (4,6), 'x6': (7,6)}
A = np.zeros((len(outdict)*2, 4), dtype=np.double)
B = np.zeros((len(outdict)*2, 1), dtype=np.double)
inpts, outpts = getFidsFromDicts(indict, outdict, outsize)
for i, (pin, pout) in enumerate(zip(inpts, outpts)):
A[(2*i),:] = [pin[0], -pin[1], 1, 0]
B[(2*i), 0] = pout[0]
A[(2*i+1),:] = [pin[1], pin[0], 0, 1]
B[(2*i+1), 0] = pout[1]
# multiply by A transpose on both sides
At = A.transpose()
left = np.dot(At, A)
right = np.dot(At, B)
# linear least squares solve for the transform, x
x, resids, rank, s = np.linalg.lstsq(left, right)
# Transformation matrix is [[a, -b, c], [b, a, d], [0, 0, 1]].
a = x[0,0]
b = x[1,0]
c = x[2,0]
d = x[3,0]
T = np.zeros((3, 3), dtype=np.double)
T[:,:] = [[a, -b, c], [b, a, d], [0, 0, 1]];
# the other functions expect the transpose of this matrix
ret = T.transpose()
Ax = np.dot(inpts, ret)
return ret, Ax
def getAffineTransform(indict, outdict, outsize=None):
"""Returns a transformation to go from input pts to output pts.
'indict' and 'outdict' should contain identical keys mapping to 2-tuples.
Each point is homogenized, then a linear least squares solution to A'Ax=A'B is found,
where A = inputs, B = outputs, x = affine transformation.
Returns (x, Ax), where the 2nd term is the transformed locations of the inputs."""
A, B = getFidsFromDicts(indict, outdict, outsize=outsize)
# multiply by A transpose on both sides
At = A.transpose()
left = np.dot(At, A)
right = np.dot(At, B)
# linear least squares solve for the transform, x
x, resids, rank, s = np.linalg.lstsq(left, right)
Ax = np.dot(A, x)
return x, Ax
def getHomography(indict, outdict, outsize=None):
"""Returns a transformation to go from input pts to output pts using a homography.
'indict' and 'outdict' should contain identical keys mapping to 2-tuples.
We create A:
x1 y1 1 0 0 0 -x1*x1' -y1*x1'
0 0 0 x1 y1 1 -x1*y1' -y1*y1'
x2 y2 1 0 0 0 -x2*x2' -y2*x2'
0 0 0 x2 y2 1 -x2*y2' -y2*y2'
...
And b:
[x1' y1' x2' y2' x3' y3' ...].T
Then solve for h in Ah = b using linear least squares, where h is:
[h11 h12 h13 h21 h22 h23 h31 h32].T
and h33 is 1.
Returns (h, Ah), where the 2nd term is the transformed locations of the inputs.
"""
# initialize both matrices
A = np.zeros((2*len(outdict), 8), dtype=np.double)
b = np.zeros((2*len(outdict), 1), dtype=np.double)
inputs, outputs = getFidsFromDicts(indict, outdict, outsize=outsize)
# copy over data
for i, ((xi, yi, _), (xo, yo, _)) in enumerate(zip(inputs, outputs)):
A[2*i,:] = [xi, yi, 1, 0, 0, 0, -xi*xo, -yi*xo]
A[2*i+1, :] = [0, 0, 0, xi, yi, 1, -xi*yo, -yi*yo]
b[2*i] = xo
b[2*i+1] = yo
#print A, A.shape, b, b.shape, inputs, inputs.shape
# Linear least squares solve
h, resids, rank, s = np.linalg.lstsq(A, b)
h = h.flatten()
ret = np.ones((3,3), dtype=np.double)
ret[:, :] = [h[:3], h[3:6], [h[6], h[7], 1.0]]
ret = ret.transpose()
# we need transposed version of h throughout
ah = np.dot(inputs, ret)
ah /= ah[:, -1:]
if 0:
print h, len(h)
print 'ret\n', ret, ret.shape
print 'normed ah\n', ah, ah.shape
print 'outputs\n', outputs
print 'inputs\n', inputs
print 'diff %\n', 100.0*(outputs-ah)/outputs
return ret, ah
def applyTransform(im, transform, outsize):
"""Apply the given transform (3 x 3 Numpy array) to the given image, cropping the image to the
given size (a (width, height) 2-tuple)."""
# compute the inverse transform and transpose it
inv = np.linalg.inv(transform)
if 0: # Old code for affine transformations only
affine = inv.transpose()[:2, :]
out = im.transform(outsize, Image.AFFINE, affine.flatten(), INTERPOLATION)
else: # full perspective transformations (note that PERSPECTIVE exists in PIL!)
homography = inv.transpose().flatten()[:-1]
out = im.transform(outsize, Image.PERSPECTIVE, homography, INTERPOLATION)
return out
TRANSFORM_TYPES = dict(simple=getSimpleTransform, similarity=getSimilarityTransform, affine=getAffineTransform, homography=getHomography)
# OUTPUT FUNCTIONS
def drawfids(im, fids, params):
"""Draws fiducials on the output image, depending on 'params'"""
from PIL import ImageDraw
# figure out what kind of fids we want to draw
drawtype = params.get('drawfids', 'none')
if drawtype == 'none': return im
assert drawtype in 'circ circle centrect centrectangle rect rectangle'.split()
# get other params
fill = params.get('drawfidsfill', 'green')
line = params.get('drawfidsline', 'green')
r = int(params.get('drawfidsr', 3))
# draw the points
draw = ImageDraw.Draw(im)
for f in fids:
x, y = int(round(f[0])), int(round(f[1]))
if drawtype in 'circ circle'.split():
draw.ellipse((x-r, y-r, x+r, y+r), outline=line, fill=fill)
elif drawtype in 'centrect centrectangle'.split():
draw.rectangle((x-r, y-r, x+r, y+r), outline=line, fill=fill)
elif drawtype in 'rect rectangle'.split():
x2, y2 = int(round(f[2])), int(round(f[3]))
draw.rectangle((x, y, x2, y2), outline=line, fill=fill)
return im
def printmat(m):
"""Returns a string for a given matrix."""
s = ''
for row in m:
s += '[%s]' % ' '.join('%f' % v for v in row)
return s
def saveImage(im, path, tags=None):
"""Save the given image to the given path, optionally saving 'tags' in the
EXIF UserComment in our usual way.
If tags is given, then the format is JPEG regardless of the filename, since
only JPEGs have EXIF. Else, it's determined by the filename.
"""
if not os.path.basename(path): raise Exception('Invalid save path %s.' % path)
try:
os.makedirs(os.path.dirname(path))
except OSError: pass
try:
kw = {}
if JPEG_QUALITY:
kw['quality'] = JPEG_QUALITY
if tags:
kw['format'] = 'JPEG'
im.save(path, **kw)
except IOError, ex:
raise Exception("Can't save to path %s. %s" % (path, ex.strerror))
if tags:
setDict(path, tags)
# MAIN ALIGNER CLASS
class Aligner(object):
"""A class to align images in different ways.
Given an 'aligner name', this generates an alignment function.
This function takes in an input image (and optionally some parameters),
and returns an aligned image.
The typical usage is like this:
aligner = Aligner('left arm')
for im, inp, outp in zip(images, fiducials, outparams):
aligned, params = aligner.align(im, fiducials=inp, outparams=outp)
The aligner names are defined in a config file in JSON format, with all the
data under the 'aligners' field. If there is no 'aligners' field, then it
tries to use the whole json structure. Here is an example:
{"aligners": {
"left-eye": {"type": "similarity",
"fids": {"LEFT_EYE_OUT": [101.84555, 120.42587], "LEFT_EYE_IN": [49,401]},
"offset": [5, 10]
},
"right-eye": {"type": "similarity",
"fids": {"RIGHT_EYE_OUT": [101.84555, 120.42587], "RIGHT_EYE_IN": [49,401]},
"offset": ["%(CROP_TOP_LEFT_X)s+5", "%(CROP_TOP_LEFT_Y)s+10"]
}
"torso": {"type": "affine",
"fids": {"LEFT_SHOULDER": [101.84555, 120.42587], "RIGHT_SHOULDER": [49,401], "LEFT_HIP": [59,1], "RIGHT_HIP": [549, 140]}
}
}
Each aligner is defined using a name, and contains fields:
'fids': A dictionary of fiducial point mappings. It maps fiducial names
to normalized output [x, y] coordinates, in the range 0-1.
'offset': [optional] An [x, y] offset to apply to all input fiducials.
If these are numbers, they are used as-is.
If they are strings, then they are eval'ed after substitution:
val = eval(s % (fidDict))
where fidDict is the dictionary of fiducial point locations.
This lets you define things such as:
"%(LEFT_EYE_IN)s+5"
'type': [optional] The underlying alignment type. One of:
None, similarity, quad, affine.
If not given, it's inferred from the length of 'fids':
< 2: None
2: similarity
4: quad
else: affine
The alignment function optionally needs some input and output parameters.
The input params are a mapping from fiducial names (corresponding to those
in the config file) to their locations in the current image. They can
also include a cropping rect with key 'crop', which is either a string of
'x0,y0,x1,y1' rect from which to extract features from, or a list of
numbers directly. This is first cut from the image and provides the extents
relative to which the feature locations are assumed to be located.
The output params include:
width: the width to scale the cropped input to (aspect ratio NOT preserved)
[default: original resolution]
height: the height to scale the cropped input to (aspect ratio NOT preserved)
[default: original resolution]
The alignment is done by creating an output image of the right size and
then mapping the input image into that output based upon the align type:
None: The image is only cropped (if specified).
similarity: The fiducials are used to determine a rotation, scaling, and translation.
quad: The 4 fiducials are mapped as a quadrilateral onto the output.
affine: The fiducials are used to compute a linear least squares affine alignment.
"""
def __init__(self, name, alignconfig=ALIGN_CONFIG_FNAME):
"""Creates an aligner of the given name.
The list of aligner names are defined in the given config file.
"""
# read the config
try:
aligndict = json.load(open(alignconfig), strict=False)
except Exception:
aligndict = {}
# set our align name and params
if not name:
name = ''
d = aligndict.get(name, {})
self.name = name
self.outfids = d.get('fids', ())
self.offsets = d.get('offset', ())
if 'type' not in d:
fidlen = len(self.outfids)
if fidlen < 2:
d['type'] = ''
elif fidlen == 2:
d['type'] = 'similarity'
elif fidlen == 4:
d['type'] = 'quad'
else:
d['type'] = 'affine'
# normalize none
if d['type'].lower() == 'none':
d['type'] = ''
self.aligntype = d['type']
def align(self, im, fiducials=None, outparams=None, **kw):
"""Aligns the given image and returns (aligned image, modified outparams)."""
from nkpylib.imageutils import croprect
# normalize inputs
if not fiducials: fiducials = {}
if not outparams: outparams = {}
# Check the md5 checksum, if it exists
try:
oldmd5 = outparams['ACTUAL_IMG_MD5'].lower()
newmd5 = md5sum(open(im.filename).read())
if oldmd5 != newmd5:
raise Exception('Input file %s has checksum %s but should be %s' % (im.filename, newmd5, oldmd5))
except (KeyError, AttributeError):
pass
# shift fiducials if needed
if self.offsets:
# convert offsets to numbers if they are eval'able format strings
dx, dy = off = [eval(v % fiducials) if isinstance(v, basestring) else v for v in self.offsets]
# now apply these offsets
for name, val in fiducials.items():
try:
x, y = val
fiducials[name] = (x+dx, y+dy)
except TypeError: pass
# set output size
outint = lambda varname, defval: int(outparams.get(varname, defval))
outsize = (outint('width', im.size[0]), outint('height', im.size[1]))
outparams['width'], outparams['height'] = outsize
# crop image if wanted
if 'crop' in fiducials:
rect = fiducials['crop']
if isinstance(rect, basestring):
rect = rect.split(',')
rect = tuple(map(float, rect))
im = croprect(im, rect, bg=(0,0,0))
# do the actual alignment
try:
func = TRANSFORM_TYPES[self.aligntype]
transform, outfids = func(fiducials, self.outfids, outsize)
out = applyTransform(im, transform, outsize)
out = drawfids(out, outfids, outparams)
# add some keys to outparams
outparams['AFFINE'] = printmat(transform)
outparams['%s_TIMESTAMP' % self.aligntype.upper()] = str(time.time())
except KeyError, e:
#raise e
# unknown or no transformation -- do no transformation
# but apply drawfids if necessary
fids = [f for f in fiducials.values() if type(f) != type(1.0) and type(f) != type(1) and len(f) == 2]
out = drawfids(im, fids, outparams)
# resize if our output is not the right size already
if out.size != outsize:
out = out.resize(outsize, INTERPOLATION)
# return image and modified outparams
return (out, outparams)
def parseFiducials(s):
"""Parses a fiducials dictionary from the given string"""
from nkpylib.utils import specializeDict, str2kvdict
# make the type-specialized dict
fids = specializeDict(str2kvdict(s, sep='@', dlm='::'))
# convert _x and _y fids to pairs
names = set(f.rsplit('_', 1)[0] for f in fids if f.lower().endswith('_x') or f.lower().endswith('_y'))
def popic(name, c):
"""Pops the fids with given name and ending char, ignoring case"""
try:
#return fids.pop(name+'_'+c.lower())
return fids[name+'_'+c.lower()]
except KeyError:
#return fids.pop(name+'_'+c.upper())
return fids[name+'_'+c.upper()]
for n in names:
fids[n] = x, y = popic(n, 'x'), popic(n, 'y')
return fids
def simplemain():
"""A simple main"""
from nkpylib.utils import specializeDict, str2kvdict
if len(sys.argv) < 6:
print 'Usage: python %s <aligner name> <input image name> <output image name> <fiducials> <outparams>' % (sys.argv[0])
sys.exit()
name, infname, outfname, fiducials, outparams = sys.argv[1:6]
a = Aligner(name=name)
im = Image.open(infname)
fiducials = parseFiducials(fiducials)
outparams = str2kvdict(outparams, sep='@', dlm='::')
print 'INFILE:', infname
print 'FIDUCIALS:', fiducials
print 'OUTPARAMS:', outparams
aligned, params = a.align(im, fiducials=fiducials, outparams=outparams)
print 'PARAMS:', params
saveImage(aligned, outfname, params)
print 'OUTFILE:', outfname
sys.exit()
def processLine(line):
"""Process a single line of input, returning a single line of output as a string.
Input on stdin is
<input path>\t<output fmt>\t<aligner>\t<fiducials>\t<output parameters>
where:
- <input path> is a local path of the input image to align (not a url),
- <output fmt> is a format string which will generate the output path. It's given a dict with:
dfij: doifj
blah: difj
- <aligner> is the name of the aligner to use,
- <fiducials> is a list of 'key@value' pairs, joined using ::
These are used for determining feature locations, which the aligners are defined relative to.
Any extra fiducials (not needed by the given aligner) are ignored.
If there is a missing fiducial, an error is returned.
- <output parameters> is an optional list of 'key@value' pairs, joined using '::'
These are used for defining parameters about the output. Currently, we support:
crop: 'x0,y0,x1,y1' rect from which to extract features from. This is
first cut from the image and provides the extents relative to which
the feature locations are assumed to be located.
[default: no crop]
width: the width to scale the cropped input to (aspect ratio NOT preserved)
[default: original resolution]
height: the height to scale the cropped input to (aspect ratio NOT preserved)
[default: original resolution]
drawfids: how to draw fiducials on output. options:
none: don't draw fiducials [default]
circle: draw a circle
rectangle: draw a rectangle
drawfidsline: the color to draw fiducial outlines in, as any valid color string (only if drawfids is on)
[default: green]
drawfidsfill: the color to fill drawn fiducials in, as any valid color string (only if drawfids is on)
[default: green]
drawfidsr: the radius of the circle to draw fiducials in
[default: 3]
outfmt: the output format to print on stdout. This is a standard python format string,
to which we'll pass a dictionary with the following fields:
basename: input file basename
inpath: input file path
outpath: output file path
outfmt: the passed-in output file format string
aligner: the passed-in aligner string
fiducials: the passed-in input parameters string
outparams: the passed-in output parameters string
[default: '%(inpath)s\t%(outpath)s']
errfmt: what to print in case of error, again as a python format string.
The fmtdict is like in 'fmt', and also containing:
errortype: a python exception type name
errormsg: the error message
[default: 'error']
A full input string might look like:
FIXME
"""
#TODO test out various outfmt options
#TODO how to specify if we want to write EXIF or not?
from collections import defaultdict
fmtdict = defaultdict(str)
DEFAULT_OUTPARAMS = defaultdict(str)
DEFAULT_OUTPARAMS['outfmt'] = DEFAULT_OUTPUT_FMT
DEFAULT_OUTPARAMS['errfmt'] = DEFAULT_ERROR_FMT
DEFAULT_OUTPARAMS['drawfids'] = 'none'
DEFAULT_OUTPARAMS['drawfidsline'] = 'green'
DEFAULT_OUTPARAMS['drawfidsfill'] = 'green'
DEFAULT_OUTPARAMS['drawfidsr'] = 3
# parse elements
els = line.split('\t')
try:
# input and output
fmtdict['inpath'] = inpath = els.pop(0)
fmtdict['basename'] = basename = os.path.basename(inpath)
fmtdict['outpathfmt'] = outpathfmt = els.pop(0)
#print path, basename, fmtdict, outfmt
# aligner
fmtdict['aligner'] = aligner = els.pop(0)
#print aligner
# fiducials
fmtdict['fiducials'] = fiducials = els.pop(0)
fiducials = parseFiducials(fiducials)
# output params
outparams = dict(**DEFAULT_OUTPARAMS)
#print outparams
if els:
# output params are optional, so we don't want to raise an exception here
fmtdict['outparams'] = els.pop(0)
#print fmtdict['outparams']
outparams.update(str2kvdict(fmtdict['outparams'], sep='@', dlm='::'))
#print outparams
# at this stage, we have everything we need
# first make sure the file exists and open it
if not os.path.exists(inpath): raise IOError('Image does not exist')
im = Image.open(inpath)
# process the image
a = Aligner(name=aligner)
aligned, params = a.align(im, fiducials=fiducials, outparams=outparams)
fmtdict.update(params)
outparams['outfmt'] = outparams['outfmt'].replace(r'\t', '\t').replace(r'\n', '\n')
# save the output image
fmtdict['outpath'] = outpath = outpathfmt % fmtdict
#print outpathfmt, inpath, basename, fmtdict, outpath
fmtdict['outpathfmt'] = fmtdict['outpathfmt'].replace(r'\t', '\t').replace(r'\n', '\n')
saveImage(aligned, outpath, params)
# generate the output string
ret = outparams['outfmt'] % (fmtdict)
return ret
except Exception, e:
raise
# add the error values to the fmtdict
fmtdict['errortype'] = type(e).__name__
try:
fmtdict['errormsg'] = e
except Exception:
pass
# generate and return the error string
errstr = outparams['errfmt'] % fmtdict
return errstr
def mainloop():
"""An infinite main loop for running alignment"""
global ALIGN_CONFIG_FNAME
if len(sys.argv) < 1:
print "Usage: python %s [<aligners file>=%s]" % (sys.argv[0], ALIGN_CONFIG_FNAME)
sys.exit()
try:
ALIGN_CONFIG_FNAME = sys.argv[1]
except IndexError:
pass
assert os.path.exists(ALIGN_CONFIG_FNAME), 'The given align config file %s does not exist!' % (ALIGN_CONFIG_FNAME)
def do(line):
try:
print processLine(line)
sys.stdout.flush()
except IOError:
pass
stdmainloop(do)
def parseInputs(fname):
"""Parses the input in the given file to get a dict of fiducials."""
from imageutils import imageiter
fids = {}
try:
# if it's an image, read colored points as fiducials
im = Image.open(fname)
assert im.mode == 'RGB'
# get all colored pixels
for loc, col in imageiter(im):
r, g, b = col
if r==g==b: continue # grayscale points don't count
if col not in fids:
fids[col] = []
fids[col].append(loc)
# average each color's location to get precise estimate
for col in fids:
xs, ys = zip(*fids[col])
l = float(len(xs))
fids[col] = (sum(xs)/l, sum(ys)/l)
except Exception:
# it's a text file, so get the format
for i, l in enumerate(open(fname)):
l = l.rstrip('\n')
if '\t' in l: # could possibly have string keynames
els = l.split('\t')
if len(els) == 2: # only x, y, so use i as the keyname
fids[i] = map(float, els)
elif len(els) > 2: # assume 1st field is keyname and next 2 are x,y
fids[els[0]] = map(float, els[1:3])
else: # must be separated by spaces, so no strings
fids[i] = map(float, l.split()[:2]) # x,y are first 2 fields
log('Read %d fids from %s, with first items: %s' % (len(fids), fname, sorted(fids.items())[:3]))
return fids
TEST_USAGE = '''Usages:
<%(transtypes)s> <input fids> <output fids> [<transformed fids fname>] => homography
<%(transtypes)s> <img1 w/colored fids> <img2 w/colored fids> [<transformed img1 fname>] => homography
''' % (dict(transtypes='|'.join(TRANSFORM_TYPES)))
def testhomography(args):
"""Tests out homography estimation"""
import select
np.set_printoptions(precision=7, linewidth=150, suppress=1)
if len(args) < 2:
print TEST_USAGE
sys.exit()
# read inputs and parse
transtype = args.pop(0)
transfunc = TRANSFORM_TYPES[transtype]
in1 = args.pop(0)
in2 = args.pop(0)
fids1, fids2 = parseInputs(in1), parseInputs(in2)
h, ah = transfunc(fids1, fids2)
print h
# any remaining arguments are for outputs
if args:
outname = args.pop(0)
if outname.endswith('.jpg') or outname.endswith('.png'):
# save output image
outim = Image.open(in1)
outim = applyTransform(outim, h, Image.open(in2).size)
outim.save(outname)
else:
# save projected points text file
pass #TODO
if select.select([sys.stdin], [], [], 0.0)[0]:
#TODO also translate from stdin pts to stdout
# we have stdin data
pass
def debug():
a = [[220, 298],
[427, 313],
[297, 374],
[244, 457],
[379, 469],
[176, 257],
[278, 244],
[191, 282],
[230, 276],
[214, 324],
[256, 315],
[383, 252],
[489, 266],
[392, 318],
[424, 288],
[432, 338],
[474, 307],
[288, 319],
[337, 323],
[277, 354],
[323, 357],
[266, 386],
[347, 396],
[298, 409],
[298, 425],
[298, 443],
[300, 457]]
b = [[198.24593, 218.48312],
[301.75409, 218.48312],
[250, 288.72064],
[196.73442, 335.1088],
[303.26559, 335.1088],
[152.62563, 206.89514],
[220.81578, 196.98947],
[180.65184, 221.26352],
[196.88483, 213.14243],
[197.29204, 224.76518],
[212.424, 220.57774],
[279.18423, 196.98947],
[347.37439, 206.89514],
[287.57602, 220.57774],
[303.11517, 213.14243],
[302.70798, 224.76518],
[319.34818, 221.26352],
[234.78632, 222.358],
[265.21368, 222.358],
[227.47029, 264.40878],
[272.52972, 264.40878],
[216.52325, 288.04016],
[283.47675, 288.04016],
[250, 329.29788],
[250, 337.32162],
[250, 347.60889],
[250, 361.46271]]
print len(a), len(b)
np.set_printoptions(precision=7, linewidth=150, suppress=1)
x, ax = getAffineTransform(b, a)
print x
print ax
out = applyTransform(Image.open('cat.png'), x, (225,250))
out.save('cat-out.png')
if __name__ == "__main__":
#simplemain()
#mainloop()
#testhomography(sys.argv[1:])
debug()
|
|
'''
Unit tests for yedit
'''
import os
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error
# place yedit in our path
yedit_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, yedit_path)
from yedit import Yedit, YeditException # noqa: E402
# pylint: disable=too-many-public-methods
# Silly pylint, moar tests!
class YeditTest(unittest.TestCase):
'''
Test class for yedit
'''
data = {'a': 'a',
'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
} # noqa: E124
filename = 'yedit_test.yml'
def setUp(self):
''' setup method will create a file and set to known configuration '''
yed = Yedit(YeditTest.filename)
yed.yaml_dict = YeditTest.data
yed.write()
def test_load(self):
''' Testing a get '''
yed = Yedit('yedit_test.yml')
self.assertEqual(yed.yaml_dict, self.data)
def test_write(self):
''' Testing a simple write '''
yed = Yedit('yedit_test.yml')
yed.put('key1', 1)
yed.write()
self.assertTrue('key1' in yed.yaml_dict)
self.assertEqual(yed.yaml_dict['key1'], 1)
def test_write_x_y_z(self):
'''Testing a write of multilayer key'''
yed = Yedit('yedit_test.yml')
yed.put('x.y.z', 'modified')
yed.write()
yed.load()
self.assertEqual(yed.get('x.y.z'), 'modified')
def test_delete_a(self):
'''Testing a simple delete '''
yed = Yedit('yedit_test.yml')
yed.delete('a')
yed.write()
yed.load()
self.assertTrue('a' not in yed.yaml_dict)
def test_delete_b_c(self):
'''Testing delete of layered key '''
yed = Yedit('yedit_test.yml', separator=':')
yed.delete('b:c')
yed.write()
yed.load()
self.assertTrue('b' in yed.yaml_dict)
self.assertFalse('c' in yed.yaml_dict['b'])
def test_create(self):
'''Testing a create '''
os.unlink(YeditTest.filename)
yed = Yedit('yedit_test.yml')
yed.create('foo', 'bar')
yed.write()
yed.load()
self.assertTrue('foo' in yed.yaml_dict)
self.assertTrue(yed.yaml_dict['foo'] == 'bar')
def test_create_content(self):
'''Testing a create with content '''
content = {"foo": "bar"}
yed = Yedit("yedit_test.yml", content)
yed.write()
yed.load()
self.assertTrue('foo' in yed.yaml_dict)
self.assertTrue(yed.yaml_dict['foo'], 'bar')
def test_array_insert(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[0]') == 'inject')
def test_array_insert_first_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[1]') == 'f')
def test_array_insert_second_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[2]') == 'g')
def test_dict_array_dict_access(self):
'''Testing a create with content'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.get('b:c:d[0]:[0]:x:y') == 'inject')
def test_dict_array_dict_replace(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.put('b:c:d[0]:[0]:x:y', 'testing')
self.assertTrue('b' in yed.yaml_dict)
self.assertTrue('c' in yed.yaml_dict['b'])
self.assertTrue('d' in yed.yaml_dict['b']['c'])
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertTrue('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'] == 'testing') # noqa: E501
def test_dict_array_dict_remove(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.delete('b:c:d[0]:[0]:x:y')
self.assertTrue('b' in yed.yaml_dict)
self.assertTrue('c' in yed.yaml_dict['b'])
self.assertTrue('d' in yed.yaml_dict['b']['c'])
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertFalse('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
def test_key_exists_in_dict(self):
'''Testing exist in dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c', 'd'))
def test_key_exists_in_list(self):
'''Testing exist in list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c:d', [{'x': {'y': 'inject'}}]))
self.assertFalse(yed.exists('b:c:d', [{'x': {'y': 'test'}}]))
def test_update_to_list_with_index(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], index=2)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list_with_curr_value(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], curr_value=3)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list(self):
'''Testing update to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_append_twice_to_list(self):
'''Testing append to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.append('x:y:z', [5, 6])
yed.append('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
self.assertFalse(yed.exists('x:y:z', 4))
def test_add_item_to_dict(self):
'''Testing update to dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', {'a': 1, 'b': 2})
yed.update('x:y:z', {'c': 3, 'd': 4})
self.assertTrue(yed.get('x:y:z') == {'a': 1, 'b': 2, 'c': 3, 'd': 4})
self.assertTrue(yed.exists('x:y:z', {'c': 3}))
def test_first_level_dict_with_none_value(self):
'''test dict value with none value'''
yed = Yedit(content={'a': None}, separator=":")
yed.put('a:b:c', 'test')
self.assertTrue(yed.get('a:b:c') == 'test')
self.assertTrue(yed.get('a:b'), {'c': 'test'})
def test_adding_yaml_variable(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z:y', '{{test}}')
self.assertTrue(yed.get('z:y') == '{{test}}')
def test_keys_with_underscore(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z_:y_y', {'test': '{{test}}'})
self.assertTrue(yed.get('z_:y_y') == {'test': '{{test}}'})
def test_first_level_array_update(self):
'''test update on top level array'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.update('', {'c': 4})
self.assertTrue({'c': 4} in yed.get(''))
def test_first_level_array_delete(self):
'''test remove top level key'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.delete('')
self.assertTrue({'b': 3} not in yed.get(''))
def test_first_level_array_get(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.get('')
self.assertTrue([{'a': 1}, {'b': 2}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.pop('', {'b': 2})
self.assertTrue([{'a': 1}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item_2(self):
'''test dict value with none value'''
z = list(range(10))
yed = Yedit(content=z, separator=':')
yed.pop('', 5)
z.pop(5)
self.assertTrue(z == yed.yaml_dict)
def test_pop_dict_key(self):
'''test dict value with none value'''
yed = Yedit(content={'a': {'b': {'c': 1, 'd': 2}}}, separator='#')
yed.pop('a#b', 'c')
self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict)
def test_accessing_path_with_unexpected_objects(self):
'''test providing source path objects that differ from current object state'''
yed = Yedit(content={'a': {'b': {'c': ['d', 'e']}}})
with self.assertRaises(YeditException):
yed.put('a.b.c.d', 'x')
def test_creating_new_objects_with_embedded_list(self):
'''test creating new objects with an embedded list in the creation path'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff[0].here', 'value')
def test_creating_new_objects_with_trailing_list(self):
'''test creating new object(s) where the final piece is a list'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff.here[0]', 'item')
def test_empty_key_with_int_value(self):
'''test editing top level with not list or dict'''
yed = Yedit(content={'a': {'b': 12}})
result = yed.put('', 'b')
self.assertFalse(result[0])
def test_setting_separator(self):
'''test editing top level with not list or dict'''
yed = Yedit(content={'a': {'b': 12}})
yed.separator = ':'
self.assertEqual(yed.separator, ':')
def test_remove_all(self):
'''test removing all data'''
data = Yedit.remove_entry({'a': {'b': 12}}, '')
self.assertTrue(data)
def test_remove_dict_entry(self):
'''test removing dict entry'''
data = {'a': {'b': [{'c': 3, 'd': 4, 'e': 5}]}}
results = Yedit.remove_entry(data, 'a.b[0].c')
self.assertTrue(results)
self.assertEqual(data, {'a': {'b': [{'d': 4, 'e': 5}]}})
def test_remove_dict_entry_top_all(self):
'''test removing dict entry top all'''
data = {'a': 1, 'b': 2}
results = Yedit.remove_entry(data, '')
self.assertTrue(results)
self.assertEqual(data, {})
def test_remove_dict_entry_top(self):
'''test removing dict entry top'''
data = {'a': 1, 'b': 2}
results = Yedit.remove_entry(data, '', value='b')
self.assertTrue(results)
self.assertEqual(data, {'a': 1})
def test_remove_list_entry(self):
'''test removing list entry'''
data = {'a': {'b': [{'c': 3}]}}
results = Yedit.remove_entry(data, 'a.b[0]')
self.assertTrue(results)
self.assertEqual(data, {'a': {'b': []}})
def test_remove_list_entry_value_top(self):
'''test removing top list entry'''
data = ['c', 'd', 'e']
results = Yedit.remove_entry(data, '', value='d')
self.assertTrue(results)
self.assertEqual(data, ['c', 'e'])
def test_remove_list_entry_index_top(self):
'''test removing top list entry'''
data = ['c', 'd', 'e']
results = Yedit.remove_entry(data, '', 2)
self.assertTrue(results)
self.assertEqual(data, ['c', 'd'])
def test_remove_list_entry_index(self):
'''test removing list entry 1 index'''
data = {'a': {'b': ['c', 'd', 'e']}}
results = Yedit.remove_entry(data, 'a.b[1]')
self.assertTrue(results)
self.assertEqual(data, {'a': {'b': ['c', 'e']}})
def test_parse_value_string_true(self):
'''test parse_value'''
results = Yedit.parse_value('true', 'str')
self.assertEqual(results, 'true')
def test_parse_value_bool_true(self):
'''test parse_value'''
results = Yedit.parse_value('true', 'bool')
self.assertTrue(results)
def test_parse_value_bool_exception(self):
'''test parse_value'''
with self.assertRaises(YeditException):
Yedit.parse_value('TTT', 'bool')
@mock.patch('yedit.Yedit.write')
def test_run_ansible_basic(self, mock_write):
'''test parse_value'''
params = {
'src': None,
'backup': False,
'backup_ext': '',
'separator': '.',
'state': 'present',
'edits': [],
'value': None,
'key': None,
'content': {'a': {'b': {'c': 1}}},
'content_type': '',
}
results = Yedit.run_ansible(params)
mock_write.side_effect = [
(True, params['content']),
]
self.assertFalse(results['changed'])
@mock.patch('yedit.Yedit.write')
def test_run_ansible_and_write(self, mock_write):
'''test parse_value'''
params = {
'src': '/tmp/test',
'backup': False,
'backup_ext': '',
'separator': '.',
'state': 'present',
'edits': [],
'value': None,
'key': None,
'content': {'a': {'b': {'c': 1}}},
'content_type': '',
}
results = Yedit.run_ansible(params)
mock_write.side_effect = [
(True, params['content']),
]
self.assertTrue(results['changed'])
def tearDown(self):
'''TearDown method'''
os.unlink(YeditTest.filename)
|
|
import logging
import uuid
import pytest
import requests
import test_helpers
from dcos_test_utils import marathon
__maintainer__ = 'kensipe'
__contact__ = 'orchestration-team@mesosphere.io'
log = logging.getLogger(__name__)
def deploy_test_app_and_check(dcos_api_session, app: dict, test_uuid: str):
"""This method deploys the test server app and then
pings its /operating_environment endpoint to retrieve the container
user running the task.
In a mesos container, this will be the marathon user
In a docker container this user comes from the USER setting
from the app's Dockerfile, which, for the test application
is the default, root
"""
expanded_config = test_helpers.get_expanded_config()
default_os_user = 'nobody' if expanded_config.get('security') == 'strict' else 'root'
if 'container' in app and app['container']['type'] == 'DOCKER':
marathon_user = 'root'
else:
marathon_user = app.get('user', default_os_user)
with dcos_api_session.marathon.deploy_and_cleanup(app):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app['id'])
r = requests.get('http://{}:{}/test_uuid'.format(service_points[0].host, service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
raise Exception(msg.format(r.status_code, r.reason, r.text))
r_data = r.json()
assert r_data['test_uuid'] == test_uuid
r = requests.get('http://{}:{}/operating_environment'.format(
service_points[0].host,
service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
raise Exception(msg.format(r.status_code, r.reason, r.text))
json_uid = r.json()['uid']
if marathon_user == 'root':
assert json_uid == 0, "App running as root should have uid 0."
else:
assert json_uid != 0, ("App running as {} should not have uid 0.".format(marathon_user))
def deploy_test_app_and_check_windows(dcos_api_session, app: dict, test_uuid: str):
"""This method deploys the python test server container and then checks
if the container is up and can accept connections.
"""
# Increase the timeout of the application to avoid failing while pulling the docker image
with dcos_api_session.marathon.deploy_and_cleanup(app, timeout=2400, ignore_failed_tasks=True):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app['id'])
r = requests.get('http://{}:{}'.format(service_points[0].host, service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
raise Exception(msg.format(r.status_code, r.reason, r.text))
@pytest.mark.first
def test_docker_image_availablity():
assert test_helpers.docker_pull_image("debian:jessie"), "docker pull failed for image used in the test"
def test_if_marathon_app_can_be_deployed(dcos_api_session):
"""Marathon app deployment integration test
This test verifies that marathon app can be deployed, and that service points
returned by Marathon indeed point to the app that was deployed.
The application being deployed is a simple http server written in python.
Please test_server.py for more details.
This is done by assigning an unique UUID to each app and passing it to the
docker container as an env variable. After successful deployment, the
"GET /test_uuid" request is issued to the app. If the returned UUID matches
the one assigned to test - test succeeds.
"""
deploy_test_app_and_check(dcos_api_session, *test_helpers.marathon_test_app())
def test_if_docker_app_can_be_deployed(dcos_api_session):
"""Marathon app inside docker deployment integration test.
Verifies that a marathon app inside of a docker daemon container can be
deployed and accessed as expected.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(
network=marathon.Network.BRIDGE,
container_type=marathon.Container.DOCKER,
container_port=9080))
@pytest.mark.supportedwindows
@pytest.mark.supportedwindowsonly
def test_if_docker_app_can_be_deployed_windows(dcos_api_session):
"""Marathon app inside docker deployment integration test.
Verifies that a marathon app inside of a docker daemon container can be
deployed and accessed as expected on Windows.
"""
deploy_test_app_and_check_windows(dcos_api_session, *test_helpers.marathon_test_app_windows())
@pytest.mark.parametrize('healthcheck', [
marathon.Healthcheck.HTTP,
marathon.Healthcheck.MESOS_HTTP,
])
def test_if_ucr_app_can_be_deployed(dcos_api_session, healthcheck):
"""Marathon app inside ucr deployment integration test.
Verifies that a marathon docker app inside of a ucr container can be
deployed and accessed as expected.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(
container_type=marathon.Container.MESOS,
healthcheck_protocol=healthcheck))
def test_if_marathon_app_can_be_deployed_with_mesos_containerizer(dcos_api_session):
"""Marathon app deployment integration test using the Mesos Containerizer
This test verifies that a Marathon app using the Mesos containerizer with
a Docker image can be deployed.
This is done by assigning an unique UUID to each app and passing it to the
docker container as an env variable. After successfull deployment, the
"GET /test_uuid" request is issued to the app. If the returned UUID matches
the one assigned to test - test succeds.
When port mapping is available (MESOS-4777), this test should be updated to
reflect that.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(container_type=marathon.Container.MESOS))
def test_if_marathon_pods_can_be_deployed_with_mesos_containerizer(dcos_api_session):
"""Marathon pods deployment integration test using the Mesos Containerizer
This test verifies that a Marathon pods can be deployed.
"""
test_uuid = uuid.uuid4().hex
# create pod with trivial apps that function as long running processes
pod_definition = {
'id': '/integration-test-pods-{}'.format(test_uuid),
'scaling': {'kind': 'fixed', 'instances': 1},
'environment': {'PING': 'PONG'},
'containers': [
{
'name': 'ct1',
'resources': {'cpus': 0.1, 'mem': 32},
'image': {'kind': 'DOCKER', 'id': 'debian:jessie'},
'exec': {'command': {'shell': 'touch foo; while true; do sleep 1; done'}},
'healthcheck': {'command': {'shell': 'test -f foo'}}
},
{
'name': 'ct2',
'resources': {'cpus': 0.1, 'mem': 32},
'exec': {'command': {'shell': 'echo $PING > foo; while true; do sleep 1; done'}},
'healthcheck': {'command': {'shell': 'test $PING = `cat foo`'}}
}
],
'networks': [{'mode': 'host'}]
}
with dcos_api_session.marathon.deploy_pod_and_cleanup(pod_definition):
# Trivial app if it deploys, there is nothing else to check
pass
def test_octarine(dcos_api_session, timeout=30):
expanded_config = test_helpers.get_expanded_config()
if expanded_config.get('security') == 'strict':
pytest.skip('See: https://jira.mesosphere.com/browse/DCOS-14760')
# This app binds to port 80. This is only required by the http (not srv)
# transparent mode test. In transparent mode, we use ".mydcos.directory"
# to go to localhost, the port attached there is only used to
# determine which port to send traffic to on localhost. When it
# reaches the proxy, the port is not used, and a request is made
# to port 80.
app, uuid = test_helpers.marathon_test_app(host_port=80)
app['acceptedResourceRoles'] = ["slave_public"]
app['requirePorts'] = True
with dcos_api_session.marathon.deploy_and_cleanup(app):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app['id'])
port_number = service_points[0].port
# It didn't actually grab port 80 when requirePorts was unset
assert port_number == app['portDefinitions'][0]["port"]
app_name = app["id"].strip("/")
port_name = app['portDefinitions'][0]["name"]
port_protocol = app['portDefinitions'][0]["protocol"]
srv = "_{}._{}._{}.marathon.mesos".format(port_name, app_name, port_protocol)
addr = "{}.marathon.mesos".format(app_name)
transparent_suffix = ".mydcos.directory"
standard_mode = "standard"
transparent_mode = "transparent"
t_addr_bind = 2508
t_srv_bind = 2509
standard_addr = "{}:{}/ping".format(addr, port_number)
standard_srv = "{}/ping".format(srv)
transparent_addr = "{}{}:{}/ping".format(addr, transparent_suffix, t_addr_bind)
transparent_srv = "{}{}:{}/ping".format(srv, transparent_suffix, t_srv_bind)
# The uuids are different between runs so that they don't have a
# chance of colliding. They shouldn't anyways, but just to be safe.
octarine_runner(dcos_api_session, standard_mode, uuid + "1", standard_addr)
octarine_runner(dcos_api_session, standard_mode, uuid + "2", standard_srv)
octarine_runner(dcos_api_session, transparent_mode, uuid + "3", transparent_addr, bind_port=t_addr_bind)
octarine_runner(dcos_api_session, transparent_mode, uuid + "4", transparent_srv, bind_port=t_srv_bind)
def octarine_runner(dcos_api_session, mode, uuid, uri, bind_port=None):
log.info("Running octarine(mode={}, uuid={}, uri={}".format(mode, uuid, uri))
octarine = "/opt/mesosphere/bin/octarine"
bind_port_str = ""
if bind_port is not None:
bind_port_str = "-bindPort {}".format(bind_port)
server_cmd = "{} -mode {} {} {}".format(octarine, mode, bind_port_str, uuid)
log.info("Server: {}".format(server_cmd))
proxy = ('http://127.0.0.1:$({} --client --port {})'.format(octarine, uuid))
curl_cmd = '''"$(curl --fail --proxy {} {})"'''.format(proxy, uri)
expected_output = '''"$(printf "{\\n \\"pong\\": true\\n}")"'''
check_cmd = """sh -c '[ {} = {} ]'""".format(curl_cmd, expected_output)
log.info("Check: {}".format(check_cmd))
app, uuid = test_helpers.marathon_test_app()
app['requirePorts'] = True
app['cmd'] = server_cmd
app['healthChecks'] = [{
"protocol": "COMMAND",
"command": {"value": check_cmd},
'gracePeriodSeconds': 5,
'intervalSeconds': 10,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 30
}]
with dcos_api_session.marathon.deploy_and_cleanup(app):
pass
|
|
r'''
This module provides utilities to get the absolute filenames so that we can be sure that:
- The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit).
- Providing means for the user to make path conversions when doing a remote debugging session in
one machine and debugging in another.
To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths.
@note:
in this context, the server is where your python process is running
and the client is where eclipse is running.
E.g.:
If the server (your python process) has the structure
/user/projects/my_project/src/package/module1.py
and the client has:
c:\my_project\src\package\module1.py
the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be:
PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\my_project\src', r'/user/projects/my_project/src')]
alternatively, this can be set with an environment variable from the command line:
set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\my_project\src','/user/projects/my_project/src']]
@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations
@note: the case of the paths is important! Note that this can be tricky to get right when one machine
uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being
debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON).
@note: all the paths with breakpoints must be translated (otherwise they won't be found in the server)
@note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation)
import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)
see parameter docs on pydevd.py
@note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible
through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target
machine for the paths that'll actually have breakpoints).
'''
from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
import json
import os.path
import sys
import traceback
_os_normcase = os.path.normcase
basename = os.path.basename
exists = os.path.exists
join = os.path.join
try:
rPath = os.path.realpath # @UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
# defined as a list of tuples where the 1st element of the tuple is the path in the client machine
# and the 2nd element is the path in the server machine.
# see module docstring for more details.
try:
PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]'))
except Exception:
sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\n')
traceback.print_exc()
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list):
sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\n')
PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
# Converting json lists to tuple
PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON]
# example:
# PATHS_FROM_ECLIPSE_TO_PYTHON = [
# (r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy',
# r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx')
# ]
convert_to_long_pathname = lambda filename:filename
convert_to_short_pathname = lambda filename:filename
get_path_with_real_case = lambda filename:filename
if sys.platform == 'win32':
try:
import ctypes
from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD
GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetLongPathName.restype = DWORD
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD]
GetShortPathName.restype = DWORD
def _convert_to_long_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetLongPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _convert_to_short_pathname(filename):
buf = ctypes.create_unicode_buffer(MAX_PATH)
if IS_PY2 and isinstance(filename, str):
filename = filename.decode(getfilesystemencoding())
rv = GetShortPathName(filename, buf, MAX_PATH)
if rv != 0 and rv <= MAX_PATH:
filename = buf.value
if IS_PY2:
filename = filename.encode(getfilesystemencoding())
return filename
def _get_path_with_real_case(filename):
ret = convert_to_long_pathname(convert_to_short_pathname(filename))
# This doesn't handle the drive letter properly (it'll be unchanged).
# Make sure the drive letter is always uppercase.
if len(ret) > 1 and ret[1] == ':' and ret[0].islower():
return ret[0].upper() + ret[1:]
return ret
# Check that it actually works
_get_path_with_real_case(__file__)
except:
# Something didn't quite work out, leave no-op conversions in place.
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
traceback.print_exc()
else:
convert_to_long_pathname = _convert_to_long_pathname
convert_to_short_pathname = _convert_to_short_pathname
get_path_with_real_case = _get_path_with_real_case
elif IS_JYTHON and IS_WINDOWS:
def get_path_with_real_case(filename):
from java.io import File
f = File(filename)
ret = f.getCanonicalPath()
if IS_PY2 and not isinstance(ret, str):
return ret.encode(getfilesystemencoding())
return ret
if IS_WINDOWS:
if IS_JYTHON:
def normcase(filename):
return filename.lower()
else:
def normcase(filename):
# `normcase` doesn't lower case on Python 2 for non-English locale, but Java
# side does it, so we should do it manually.
if '~' in filename:
filename = convert_to_long_pathname(filename)
filename = _os_normcase(filename)
return filename.lower()
else:
def normcase(filename):
return filename # no-op
_ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX'
def set_ide_os(os):
'''
We need to set the IDE os because the host where the code is running may be
actually different from the client (and the point is that we want the proper
paths to translate from the client to the server).
:param os:
'UNIX' or 'WINDOWS'
'''
global _ide_os
prev = _ide_os
if os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116)
os = 'WINDOWS'
assert os in ('WINDOWS', 'UNIX')
if prev != os:
_ide_os = os
# We need to (re)setup how the client <-> server translation works to provide proper separators.
setup_client_server_paths(_last_client_server_paths_set)
DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true')
# Caches filled as requested during the debug session.
NORM_PATHS_CONTAINER = {}
NORM_PATHS_AND_BASE_CONTAINER = {}
def _NormFile(filename):
abs_path, real_path = _NormPaths(filename)
return real_path
def _AbsFile(filename):
abs_path, real_path = _NormPaths(filename)
return abs_path
# Returns tuple of absolute path and real path for given filename
def _NormPaths(filename):
try:
return NORM_PATHS_CONTAINER[filename]
except KeyError:
if filename.__class__ != str:
raise AssertionError('Paths passed to _NormPaths must be str. Found: %s (%s)' % (filename, type(filename)))
abs_path = _NormPath(filename, os.path.abspath)
real_path = _NormPath(filename, rPath)
# cache it for fast access later
NORM_PATHS_CONTAINER[filename] = abs_path, real_path
return abs_path, real_path
def _NormPath(filename, normpath):
r = normpath(filename)
ind = r.find('.zip')
if ind == -1:
ind = r.find('.egg')
if ind != -1:
ind += 4
zip_path = r[:ind]
inner_path = r[ind:]
if inner_path.startswith('!'):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with 'exists'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
if inner_path:
r = join(normcase(zip_path), inner_path)
return r
r = normcase(r)
return r
_ZIP_SEARCH_CACHE = {}
_NOT_FOUND_SENTINEL = object()
def exists(file):
if os.path.exists(file):
return file
ind = file.find('.zip')
if ind == -1:
ind = file.find('.egg')
if ind != -1:
ind += 4
zip_path = file[:ind]
inner_path = file[ind:]
if inner_path.startswith("!"):
# Note (fabioz): although I can replicate this by creating a file ending as
# .zip! or .egg!, I don't really know what's the real-world case for this
# (still kept as it was added by @jetbrains, but it should probably be reviewed
# later on).
# Note 2: it goes hand-in-hand with '_NormPath'.
inner_path = inner_path[1:]
zip_path = zip_path + '!'
zip_file_obj = _ZIP_SEARCH_CACHE.get(zip_path, _NOT_FOUND_SENTINEL)
if zip_file_obj is None:
return False
elif zip_file_obj is _NOT_FOUND_SENTINEL:
try:
import zipfile
zip_file_obj = zipfile.ZipFile(zip_path, 'r')
_ZIP_SEARCH_CACHE[zip_path] = zip_file_obj
except:
_ZIP_SEARCH_CACHE[zip_path] = _NOT_FOUND_SENTINEL
return False
try:
if inner_path.startswith('/') or inner_path.startswith('\\'):
inner_path = inner_path[1:]
_info = zip_file_obj.getinfo(inner_path.replace('\\', '/'))
return join(zip_path, inner_path)
except KeyError:
return None
return None
# Now, let's do a quick test to see if we're working with a version of python that has no problems
# related to the names generated...
try:
try:
code = rPath.func_code
except AttributeError:
code = rPath.__code__
if not exists(_NormFile(code.co_filename)):
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to be incorrectly compiled (internal generated filenames are not absolute)\n')
sys.stderr.write('pydev debugger: The debugger may still function, but it will work slower and may miss breakpoints.\n')
sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\n')
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.flush()
NORM_SEARCH_CACHE = {}
initial_norm_paths = _NormPaths
def _NormPaths(filename): # Let's redefine _NormPaths to work with paths that may be incorrect
try:
return NORM_SEARCH_CACHE[filename]
except KeyError:
abs_path, real_path = initial_norm_paths(filename)
if not exists(real_path):
# We must actually go on and check if we can find it as if it was a relative path for some of the paths in the pythonpath
for path in sys.path:
abs_path, real_path = initial_norm_paths(join(path, filename))
if exists(real_path):
break
else:
sys.stderr.write('pydev debugger: Unable to find real location for: %s\n' % (filename,))
abs_path = filename
real_path = filename
NORM_SEARCH_CACHE[filename] = abs_path, real_path
return abs_path, real_path
except:
# Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that
traceback.print_exc()
# Note: as these functions may be rebound, users should always import
# pydevd_file_utils and then use:
#
# pydevd_file_utils.norm_file_to_client
# pydevd_file_utils.norm_file_to_server
#
# instead of importing any of those names to a given scope.
def _original_file_to_client(filename, cache={}):
try:
return cache[filename]
except KeyError:
cache[filename] = get_path_with_real_case(_AbsFile(filename))
return cache[filename]
_original_file_to_server = _NormFile
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
def _fix_path(path, sep):
if path.endswith('/') or path.endswith('\\'):
path = path[:-1]
if sep != '/':
path = path.replace('/', sep)
return path
_last_client_server_paths_set = []
def setup_client_server_paths(paths):
'''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON'''
global norm_file_to_client
global norm_file_to_server
global _last_client_server_paths_set
_last_client_server_paths_set = paths[:]
# Work on the client and server slashes.
python_sep = '\\' if IS_WINDOWS else '/'
eclipse_sep = '\\' if _ide_os == 'WINDOWS' else '/'
norm_filename_to_server_container = {}
norm_filename_to_client_container = {}
initial_paths = list(paths)
paths_from_eclipse_to_python = initial_paths[:]
# Apply normcase to the existing paths to follow the os preferences.
for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]):
if IS_PY2:
if isinstance(path0, unicode):
path0 = path0.encode(sys.getfilesystemencoding())
if isinstance(path1, unicode):
path1 = path1.encode(sys.getfilesystemencoding())
path0 = _fix_path(path0, eclipse_sep)
path1 = _fix_path(path1, python_sep)
initial_paths[i] = (path0, path1)
paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1))
if not paths_from_eclipse_to_python:
# no translation step needed (just inline the calls)
norm_file_to_client = _original_file_to_client
norm_file_to_server = _original_file_to_server
return
# only setup translation functions if absolutely needed!
def _norm_file_to_server(filename, cache=norm_filename_to_server_container):
# Eclipse will send the passed filename to be translated to the python process
# So, this would be 'NormFileFromEclipseToPython'
try:
return cache[filename]
except KeyError:
if eclipse_sep != python_sep:
# Make sure that the separators are what we expect from the IDE.
filename = filename.replace(python_sep, eclipse_sep)
# used to translate a path from the client to the debug server
translated = normcase(filename)
for eclipse_prefix, server_prefix in paths_from_eclipse_to_python:
if translated.startswith(eclipse_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to server: %s\n' % (translated,))
translated = translated.replace(eclipse_prefix, server_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to server: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to server: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[0] for x in paths_from_eclipse_to_python]))
# Note that when going to the server, we do the replace first and only later do the norm file.
if eclipse_sep != python_sep:
translated = translated.replace(eclipse_sep, python_sep)
translated = _NormFile(translated)
cache[filename] = translated
return translated
def _norm_file_to_client(filename, cache=norm_filename_to_client_container):
# The result of this method will be passed to eclipse
# So, this would be 'NormFileFromPythonToEclipse'
try:
return cache[filename]
except KeyError:
# used to translate a path from the debug server to the client
translated = _NormFile(filename)
# After getting the real path, let's get it with the path with
# the real case and then obtain a new normalized copy, just in case
# the path is different now.
translated_proper_case = get_path_with_real_case(translated)
translated = _NormFile(translated_proper_case)
if IS_WINDOWS:
if translated.lower() != translated_proper_case.lower():
translated_proper_case = translated
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write(
'pydev debugger: _NormFile changed path (from: %s to %s)\n' % (
translated_proper_case, translated))
for i, (eclipse_prefix, python_prefix) in enumerate(paths_from_eclipse_to_python):
if translated.startswith(python_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to client: %s\n' % (translated,))
# Note: use the non-normalized version.
eclipse_prefix = initial_paths[i][0]
translated = eclipse_prefix + translated_proper_case[len(python_prefix):]
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to client: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to client: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[1] for x in paths_from_eclipse_to_python]))
translated = translated_proper_case
if eclipse_sep != python_sep:
translated = translated.replace(python_sep, eclipse_sep)
# The resulting path is not in the python process, so, we cannot do a _NormFile here,
# only at the beginning of this method.
cache[filename] = translated
return translated
norm_file_to_server = _norm_file_to_server
norm_file_to_client = _norm_file_to_client
setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON)
# For given file f returns tuple of its absolute path, real path and base name
def get_abs_path_real_path_and_base_from_file(f):
try:
return NORM_PATHS_AND_BASE_CONTAINER[f]
except:
abs_path, real_path = _NormPaths(f)
base = basename(real_path)
ret = abs_path, real_path, base
NORM_PATHS_AND_BASE_CONTAINER[f] = ret
return ret
def get_abs_path_real_path_and_base_from_frame(frame):
try:
return NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
# This one is just internal (so, does not need any kind of client-server translation)
f = frame.f_code.co_filename
if f is not None and f.startswith (('build/bdist.', 'build\\bdist.')):
# files from eggs in Python 2.7 have paths like build/bdist.linux-x86_64/egg/<path-inside-egg>
f = frame.f_globals['__file__']
if f is not None:
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
ret = get_abs_path_real_path_and_base_from_file(f)
# Also cache based on the frame.f_code.co_filename (if we had it inside build/bdist it can make a difference).
NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename] = ret
return ret
def get_fullname(mod_name):
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def get_package_dir(mod_name):
for path in sys.path:
mod_path = join(path, mod_name)
if os.path.isdir(mod_path):
return mod_path
return None
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from colorama import Fore, Style
import os
import sys
import errno
import re
import math
def print_str(args, _str):
"""print without newline"""
if not args.has_log:
sys.stdout.write(_str)
sys.stdout.flush()
def norm_path(path):
"""normalize path"""
return os.path.normpath(os.path.realpath(path))
# borrowed from AndersTornkvist's fork
def escape_filename_part(part):
"""escape possible offending characters"""
part = re.sub(r"\s*/\s*", r' & ', part)
part = re.sub(r"""\s*[\\/:"*?<>|]+\s*""", r' ', part)
part = part.strip()
part = re.sub(r"(^\.+\s*|(?<=\.)\.+|\s*\.+$)", r'', part)
return part
def to_ascii(args, _str, on_error='ignore'):
"""convert unicode to ascii if necessary"""
# python 3 renamed unicode to str
if sys.version_info >= (3, 0):
if isinstance(_str, bytes) and not args.ascii:
return str(_str, "utf-8")
elif isinstance(_str, str) and args.ascii:
return _str.encode("ascii", on_error).decode("utf-8")
else:
return _str
else:
if isinstance(_str, str) and not args.ascii:
return unicode(_str, "utf-8")
elif isinstance(_str, unicode) and args.ascii:
return _str.encode("ascii", on_error).decode("utf-8")
else:
return _str
def rm_file(file_name):
try:
os.remove(file_name)
except OSError as e:
# don't need to print a warning if the file doesn't exist
if e.errno != errno.ENOENT:
print(
Fore.YELLOW + "Warning: error while trying to remove file " +
file_name + Fore.RESET)
print(str(e))
def default_settings_dir():
return norm_path(os.path.join(os.path.expanduser("~"), ".spotify-ripper"))
def settings_dir(args):
return norm_path(args.settings[0]) if args.settings is not None \
else default_settings_dir()
def base_dir(args):
return norm_path(args.directory[0]) if args.directory is not None \
else os.getcwd()
def calc_file_size(args, track):
return (int(args.quality) / 8) * track.duration
# returns path of executable
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
KB_BYTES = 1024
'''Number of bytes per KB (2^10)'''
MB_BYTES = 1048576
'''Number of bytes per MB (2^20)'''
GB_BYTES = 1073741824
'''Number of bytes per GB (2^30)'''
KB_UNIT = "KB"
'''Kilobytes abbreviation'''
MB_UNIT = "MB"
'''Megabytes abbreviation'''
GB_UNIT = "GB"
'''Gigabytes abbreviation'''
# borrowed from eyeD3
def format_size(size, short=False):
"""Format ``size`` (number of bytes) into string format doing KB, MB, or GB
conversion where necessary.
When ``short`` is False (the default) the format is smallest unit of
bytes and largest gigabytes; '234 GB'.
The short version is 2-4 characters long and of the form
256b
64k
1.1G
"""
if not short:
unit = "Bytes"
if size >= GB_BYTES:
size = float(size) / float(GB_BYTES)
unit = GB_UNIT
elif size >= MB_BYTES:
size = float(size) / float(MB_BYTES)
unit = MB_UNIT
elif size >= KB_BYTES:
size = float(size) / float(KB_BYTES)
unit = KB_UNIT
return "%.2f %s" % (size, unit)
else:
suffixes = u' kMGTPEH'
if size == 0:
num_scale = 0
else:
num_scale = int(math.floor(math.log(size) / math.log(1000)))
if num_scale > 7:
suffix = '?'
else:
suffix = suffixes[num_scale]
num_scale = int(math.pow(1000, num_scale))
value = size / num_scale
str_value = str(value)
if len(str_value) >= 3 and str_value[2] == '.':
str_value = str_value[:2]
else:
str_value = str_value[:3]
return "{0:>3s}{1}".format(str_value, suffix)
# borrowed from eyeD3
def format_time(seconds, total=None, short=False):
"""
Format ``seconds`` (number of seconds) as a string representation.
When ``short`` is False (the default) the format is:
HH:MM:SS.
Otherwise, the format is exacly 6 characters long and of the form:
1w 3d
2d 4h
1h 5m
1m 4s
15s
If ``total`` is not None it will also be formatted and
appended to the result separated by ' / '.
"""
def time_tuple(ts):
if ts is None or ts < 0:
ts = 0
hours = ts / 3600
mins = (ts % 3600) / 60
secs = (ts % 3600) % 60
tstr = '%02d:%02d' % (mins, secs)
if int(hours):
tstr = '%02d:%s' % (hours, tstr)
return (int(hours), int(mins), int(secs), tstr)
if not short:
hours, mins, secs, curr_str = time_tuple(seconds)
retval = curr_str
if total:
hours, mins, secs, total_str = time_tuple(total)
retval += ' / %s' % total_str
return retval
else:
units = [
(u'y', 60 * 60 * 24 * 7 * 52),
(u'w', 60 * 60 * 24 * 7),
(u'd', 60 * 60 * 24),
(u'h', 60 * 60),
(u'm', 60),
(u's', 1),
]
seconds = int(seconds)
if seconds < 60:
return u'00m {0:02d}s'.format(seconds)
for i in range(len(units) - 1):
unit1, limit1 = units[i]
unit2, limit2 = units[i + 1]
if seconds >= limit1:
return u'{0:02d}{1} {2:02d}{3}'.format(
seconds // limit1, unit1,
(seconds % limit1) // limit2, unit2)
return u' ~inf'
|
|
import cuttsum.events
import cuttsum.corpora
from cuttsum.pipeline import InputStreamResource
from cuttsum.classifiers import NuggetRegressor
import cuttsum.judgements
import pandas as pd
import numpy as np
from datetime import datetime
from cuttsum.misc import event2semsim
from sklearn.cluster import AffinityPropagation
from sklearn.metrics.pairwise import cosine_similarity
import os
def epoch(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
matches_df = cuttsum.judgements.get_merged_dataframe()
def get_input_stream(event, gold_probs, extractor="goose", thresh=.8, delay=None, topk=20):
max_nuggets = 3
corpus = cuttsum.corpora.get_raw_corpus(event)
res = InputStreamResource()
df = pd.concat(
res.get_dataframes(event, corpus, extractor, thresh, delay, topk))
selector = (df["n conf"] == 1) & (df["nugget probs"].apply(len) == 0)
df.loc[selector, "nugget probs"] = df.loc[selector, "nuggets"].apply(lambda x: {n:1 for n in x})
df["true probs"] = df["nugget probs"].apply(lambda x: [val for key, val in x.items()] +[0])
df["true probs"] = df["true probs"].apply(lambda x: np.max(x))
df.loc[(df["n conf"] == 1) & (df["nuggets"].apply(len) == 0), "true probs"] = 0
if gold_probs is True:
df["probs"] = df["true probs"]
else:
df["probs"] = NuggetRegressor().predict(event, df)
df["nuggets"] = df["nugget probs"].apply(
lambda x: set([key for key, val in x.items() if val > .9]))
nid2time = {}
nids = set(matches_df[matches_df["query id"] == event.query_id]["nugget id"].tolist())
for nid in nids:
ts = matches_df[matches_df["nugget id"] == nid]["update id"].apply(lambda x: int(x.split("-")[0])).tolist()
ts.sort()
nid2time[nid] = ts[0]
fltr_nuggets = []
for name, row in df.iterrows():
fltr_nuggets.append(
set([nug for nug in row["nuggets"] if nid2time[nug] <= row["timestamp"]]))
#print df[["nuggets", "timestamp"]].apply(lambda y: print y[0]) # datetime.utcfromtimestamp(int(y["timestamp"])))
#print nids
df["nuggets"] = fltr_nuggets
df["nuggets"] = df["nuggets"].apply(lambda x: x if len(x) <= max_nuggets else set([]))
from cuttsum.pipeline import DedupedArticlesResource
ded = DedupedArticlesResource()
stats_df = ded.get_stats_df(event, corpus, extractor, thresh)
stats_df["stream ids"] = stats_df["stream ids"].apply(lambda x: set(eval(x)))
sid2match = {}
for _, row in stats_df.iterrows():
for sid in row["stream ids"]:
sid2match[sid] = row["match"]
all_ts = []
all_docs = []
new_docs = []
for (sid, ts), doc in df.groupby(["stream id", "timestamp"]):
# print sub_doc
if len(all_ts) > 0:
assert ts >= all_ts[-1]
all_ts.append(ts)
if sid2match[sid] is True:
new_docs.append(doc)
all_docs.append(doc)
df = pd.concat(new_docs)
print len(all_docs), len(new_docs)
return df
def main(output_dir, sim_threshold, bucket_size, pref_offset):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dev_qids = set([19, 23, 27, 34, 35] + [7, 24])
summary_data = []
K_data = []
for event in cuttsum.events.get_events():
if event.query_num in dev_qids: continue
print event
semsim = event2semsim(event)
istream = get_input_stream(event, False, extractor="goose",
thresh=.8, delay=None, topk=20)
prev_time = 0
cache = None
clusters = []
max_h = len(event.list_event_hours()) - 1
for h, hour in enumerate(event.list_event_hours()):
if h % bucket_size != 0 and h != max_h:
continue
current_time = epoch(hour)
input_sents = istream[
(istream["timestamp"] < current_time) & \
(istream["timestamp"] >= prev_time)]
len_select = input_sents["lemmas stopped"].apply(len) > 10
input_sents = input_sents[len_select]
if len(input_sents) <= 1: continue
stems = input_sents["stems"].apply(lambda x: ' '.join(x)).tolist()
X = semsim.transform(stems)
probs = input_sents["probs"]
p = probs.values
K = -(1 - cosine_similarity(X))
K_ma = np.ma.masked_array(K, np.eye(K.shape[0]))
Kmin = np.ma.min(K_ma)
Kmax = np.ma.max(K_ma)
median = np.ma.median(K_ma)[0]
pref = np.minimum(p + median, -.05)
print "SYS TIME:", hour, "# SENTS:", K.shape[0],
print "min/median/max pref: {}/{}/{}".format(
pref.min(), np.median(pref), pref.max())
#K_data.append({"min": Kmin, "max": Kmax, "median": median})
K_data.append({"min": (pref).min(), "max": (pref).max(),
"median": np.median((pref))})
#print K
# continue
#
ap = AffinityPropagation(preference=pref-pref_offset, affinity="precomputed",
verbose=True, max_iter=50000)
ap.fit(K)
# ##print input_sents["pretty text"]
#
labels = ap.labels_
if ap.cluster_centers_indices_ != None:
for c in ap.cluster_centers_indices_:
if cache == None:
cache = X[c]
updates_df = input_sents.reset_index(
drop=True).iloc[c]
updates_df["query id"] = event.query_num
updates_df["system timestamp"] = current_time
summary_data.append(
updates_df[
["query id", "stream id", "sent id",
"system timestamp", "sent text"]
].to_frame().T)
else:
Ksum = cosine_similarity(cache, X[c])
#print "MAX SIM", Ksum.max()
#print input_sents.reset_index(drop=True).iloc[c]["sent text"]
if Ksum.max() < sim_threshold:
cache = np.vstack([cache, X[c]])
updates_df = input_sents.reset_index(
drop=True).iloc[c]
updates_df["query id"] = event.query_num
updates_df["system timestamp"] = current_time
summary_data.append(
updates_df[
["query id", "stream id", "sent id",
"system timestamp", "sent text"]
].to_frame().T)
#
# for l, i in enumerate(af.cluster_centers_indices_):
# support = np.sum(labels == l)
# center = input_sents.iloc[i][["update id", "sent text", "pretty text", "stems", "nuggets"]]
# center = center.to_dict()
# center["support"] = support
# center["timestamp"] = current_time
# clusters.append(center)
#
prev_time = current_time
# df = pd.DataFrame(clusters, columns=["update id", "timestamp", "support", "sent text", "pretty text", "stems", "nuggets"])
#
# import os
# dirname = "clusters"
# if not os.path.exists(dirname):
# os.makedirs(dirname)
#
# with open(os.path.join(dirname, "{}.tsv".format(event.query_id)), "w") as f:
# df.to_csv(f, sep="\t", index=False)
#
df = pd.DataFrame(K_data, columns=["min", "max", "median"])
print df
print df.mean()
print df.std()
print df.max()
df = pd.concat(summary_data)
df["conf"] = .5
df["team id"] = "APSAL"
df["run id"] = "sim{}_bs{}_off{}".format(
sim_threshold, bucket_size, pref_offset)
print df
of = os.path.join(output_dir, "apsal" + "sim{}_bs{}_off{}.tsv".format(
sim_threshold, bucket_size, pref_offset))
cols = ["query id", "team id", "run id", "stream id", "sent id",
"system timestamp", "conf"]
df[cols].to_csv(of, sep="\t", header=False, index=False)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(u"--output-dir", type=str,
required=True, help="directory to write results.")
parser.add_argument(
u"--sim-cutoff", type=float, required=True)
parser.add_argument(
u"--bucket-size", type=float, required=True)
parser.add_argument(
u"--pref-offset", type=float, required=True)
args = parser.parse_args()
main(args.output_dir, args.sim_cutoff,
args.bucket_size, args.pref_offset)
|
|
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the MongoDB storage controller for queues.
Field Mappings:
In order to reduce the disk / memory space used,
field names will be, most of the time, the first
letter of their long name.
"""
import pymongo.errors
from marconi.openstack.common.gettextutils import _
import marconi.openstack.common.log as logging
from marconi.openstack.common import timeutils
from marconi.queues import storage
from marconi.queues.storage import errors
from marconi.queues.storage.mongodb import utils
LOG = logging.getLogger(__name__)
class QueueController(storage.Queue):
"""Implements queue resource operations using MongoDB.
Queues are scoped by project, which is prefixed to the
queue name.
Queues:
Name Field
---------------------
name -> p_q
msg counter -> c
metadata -> m
Message Counter:
Name Field
-------------------
value -> v
modified ts -> t
"""
def __init__(self, *args, **kwargs):
super(QueueController, self).__init__(*args, **kwargs)
self._collection = self.driver.queues_database.queues
# NOTE(flaper87): This creates a unique index for
# project and name. Using project as the prefix
# allows for querying by project and project+name.
# This is also useful for retrieving the queues list for
# a specific project, for example. Order matters!
self._collection.ensure_index([('p_q', 1)], unique=True)
#-----------------------------------------------------------------------
# Helpers
#-----------------------------------------------------------------------
def _get(self, name, project=None, fields={'m': 1, '_id': 0}):
queue = self._collection.find_one(_get_scoped_query(name, project),
fields=fields)
if queue is None:
raise errors.QueueDoesNotExist(name, project)
return queue
def _get_counter(self, name, project=None):
"""Retrieves the current message counter value for a given queue.
This helper is used to generate monotonic pagination
markers that are saved as part of the message
document.
Note 1: Markers are scoped per-queue and so are *not*
globally unique or globally ordered.
Note 2: If two or more requests to this method are made
in parallel, this method will return the same counter
value. This is done intentionally so that the caller
can detect a parallel message post, allowing it to
mitigate race conditions between producer and
observer clients.
:param name: Name of the queue to which the counter is scoped
:param project: Queue's project
:returns: current message counter as an integer
"""
doc = self._collection.find_one(_get_scoped_query(name, project),
fields={'c.v': 1, '_id': 0})
if doc is None:
raise errors.QueueDoesNotExist(name, project)
return doc['c']['v']
def _inc_counter(self, name, project=None, amount=1, window=None):
"""Increments the message counter and returns the new value.
:param name: Name of the queue to which the counter is scoped
:param project: Queue's project name
:param amount: (Default 1) Amount by which to increment the counter
:param window: (Default None) A time window, in seconds, that
must have elapsed since the counter was last updated, in
order to increment the counter.
:returns: Updated message counter value, or None if window
was specified, and the counter has already been updated
within the specified time period.
:raises: storage.errors.QueueDoesNotExist
"""
now = timeutils.utcnow_ts()
update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}}
query = _get_scoped_query(name, project)
if window is not None:
threshold = now - window
query['c.t'] = {'$lt': threshold}
while True:
try:
doc = self._collection.find_and_modify(
query, update, new=True, fields={'c.v': 1, '_id': 0})
break
except pymongo.errors.AutoReconnect as ex:
LOG.exception(ex)
if doc is None:
if window is None:
# NOTE(kgriffs): Since we did not filter by a time window,
# the queue should have been found and updated. Perhaps
# the queue has been deleted?
message = _(u'Failed to increment the message '
u'counter for queue %(name)s and '
u'project %(project)s')
message %= dict(name=name, project=project)
LOG.warning(message)
raise errors.QueueDoesNotExist(name, project)
# NOTE(kgriffs): Assume the queue existed, but the counter
# was recently updated, causing the range query on 'c.t' to
# exclude the record.
return None
return doc['c']['v']
#-----------------------------------------------------------------------
# Interface
#-----------------------------------------------------------------------
def list(self, project=None, marker=None,
limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False):
query = utils.scoped_query(marker, project)
fields = {'p_q': 1, '_id': 0}
if detailed:
fields['m'] = 1
cursor = self._collection.find(query, fields=fields)
cursor = cursor.limit(limit).sort('p_q')
marker_name = {}
def normalizer(record):
queue = {'name': utils.descope_queue_name(record['p_q'])}
marker_name['next'] = queue['name']
if detailed:
queue['metadata'] = record['m']
return queue
yield utils.HookedCursor(cursor, normalizer)
yield marker_name and marker_name['next']
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def get_metadata(self, name, project=None):
queue = self._get(name, project)
return queue.get('m', {})
@utils.raises_conn_error
# @utils.retries_on_autoreconnect
def create(self, name, project=None):
# NOTE(flaper87): If the connection fails after it was called
# and we retry to insert the queue, we could end up returning
# `False` because of the `DuplicatedKeyError` although the
# queue was indeed created by this API call.
#
# TODO(kgriffs): Commented out `retries_on_autoreconnect` for
# now due to the above issue, since creating a queue is less
# important to make super HA.
try:
# NOTE(kgriffs): Start counting at 1, and assume the first
# message ever posted will succeed and set t to a UNIX
# "modified at" timestamp.
counter = {'v': 1, 't': 0}
scoped_name = utils.scope_queue_name(name, project)
self._collection.insert({'p_q': scoped_name, 'm': {},
'c': counter})
except pymongo.errors.DuplicateKeyError:
return False
else:
return True
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def exists(self, name, project=None):
query = _get_scoped_query(name, project)
return self._collection.find_one(query) is not None
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def set_metadata(self, name, metadata, project=None):
rst = self._collection.update(_get_scoped_query(name, project),
{'$set': {'m': metadata}},
multi=False,
manipulate=False)
if not rst['updatedExisting']:
raise errors.QueueDoesNotExist(name, project)
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def delete(self, name, project=None):
self.driver.message_controller._purge_queue(name, project)
self._collection.remove(_get_scoped_query(name, project))
@utils.raises_conn_error
@utils.retries_on_autoreconnect
def stats(self, name, project=None):
if not self.exists(name, project=project):
raise errors.QueueDoesNotExist(name, project)
controller = self.driver.message_controller
active = controller._count(name, project=project,
include_claimed=False)
total = controller._count(name, project=project,
include_claimed=True)
message_stats = {
'claimed': total - active,
'free': active,
'total': total,
}
try:
oldest = controller.first(name, project=project, sort=1)
newest = controller.first(name, project=project, sort=-1)
except errors.QueueIsEmpty:
pass
else:
now = timeutils.utcnow_ts()
message_stats['oldest'] = utils.stat_message(oldest, now)
message_stats['newest'] = utils.stat_message(newest, now)
return {'messages': message_stats}
def _get_scoped_query(name, project):
return {'p_q': utils.scope_queue_name(name, project)}
|
|
#!/usr/bin/env python3
from fontTools import ttLib
import os
import sys
__doc__ = '''\
Prints all possible kerning pairs within font.
Supports RTL kerning.
Usage:
------
python getKerningPairsFromOTF.py <path to font file>
'''
kKernFeatureTag = 'kern'
kGPOStableName = 'GPOS'
finalList = []
class myLeftClass:
def __init__(self):
self.glyphs = []
self.class1Record = 0
class myRightClass:
def __init__(self):
self.glyphs = []
self.class2Record = 0
def collect_unique_kern_lookup_indexes(featureRecord):
unique_kern_lookups = []
for featRecItem in featureRecord:
# print(featRecItem.FeatureTag)
# GPOS feature tags (e.g. kern, mark, mkmk, size) of each ScriptRecord
if featRecItem.FeatureTag == kKernFeatureTag:
feature = featRecItem.Feature
for featLookupItem in feature.LookupListIndex:
if featLookupItem not in unique_kern_lookups:
unique_kern_lookups.append(featLookupItem)
return unique_kern_lookups
class OTFKernReader(object):
def __init__(self, fontPath):
self.font = ttLib.TTFont(fontPath)
self.kerningPairs = {}
self.singlePairs = {}
self.classPairs = {}
self.pairPosList = []
self.allLeftClasses = {}
self.allRightClasses = {}
if kGPOStableName not in self.font:
print("The font has no %s table" % kGPOStableName, file=sys.stderr)
self.goodbye()
else:
self.analyzeFont()
self.findKerningLookups()
self.getPairPos()
self.getSinglePairs()
self.getClassPairs()
def goodbye(self):
print('The fun ends here.', file=sys.stderr)
return
def analyzeFont(self):
self.gposTable = self.font[kGPOStableName].table
self.scriptList = self.gposTable.ScriptList
self.featureList = self.gposTable.FeatureList
self.featureCount = self.featureList.FeatureCount
self.featureRecord = self.featureList.FeatureRecord
self.unique_kern_lookups = collect_unique_kern_lookup_indexes(
self.featureRecord)
def findKerningLookups(self):
if not len(self.unique_kern_lookups):
print(
"The font has no %s feature." % kKernFeatureTag,
file=sys.stderr)
self.goodbye()
self.lookup_list = self.gposTable.LookupList
self.lookups = []
for kern_lookup_index in sorted(self.unique_kern_lookups):
lookup = self.lookup_list.Lookup[kern_lookup_index]
# Confirm this is a GPOS LookupType 2; or
# using an extension table (GPOS LookupType 9):
'''
Lookup types:
1 Single adjustment Adjust position of a single glyph
2 Pair adjustment Adjust position of a pair of glyphs
3 Cursive attachment Attach cursive glyphs
4 MarkToBase attachment Attach a combining mark to a base glyph
5 MarkToLigature attachment Attach a combining mark to a ligature
6 MarkToMark attachment Attach a combining mark to another mark
7 Context positioning Position one or more glyphs in context
8 Chained Context positioning Position one or more glyphs in chained context
9 Extension positioning Extension mechanism for other positionings
10+ Reserved for future use
'''
if lookup.LookupType not in [2, 9]:
print('''
Info: GPOS LookupType %s found.
This type is neither a pair adjustment positioning lookup (GPOS LookupType 2),
nor using an extension table (GPOS LookupType 9), which are the only ones supported.
''' % lookup.LookupType, file=sys.stderr)
continue
self.lookups.append(lookup)
def getPairPos(self):
for lookup in self.lookups:
for subtableItem in lookup.SubTable:
if subtableItem.LookupType == 9: # extension table
if subtableItem.ExtensionLookupType == 8: # contextual
print(
'Contextual Kerning not (yet?) supported.',
file=sys.stderr)
continue
elif subtableItem.ExtensionLookupType == 2:
subtableItem = subtableItem.ExtSubTable
if subtableItem.Format not in [1, 2]:
print(
'WARNING: Coverage format %d '
'is not yet supported.' % subtableItem.Coverage.Format,
file=sys.stderr)
if subtableItem.ValueFormat1 not in [0, 4, 5]:
print(
'WARNING: ValueFormat1 format %d '
'is not yet supported.' % subtableItem.ValueFormat1,
file=sys.stderr)
if subtableItem.ValueFormat2 not in [0]:
print(
'WARNING: ValueFormat2 format %d '
'is not yet supported.' % subtableItem.ValueFormat2,
file=sys.stderr)
self.pairPosList.append(subtableItem)
# Each glyph in this list will have a corresponding PairSet
# which will contain all the second glyphs and the kerning
# value in the form of PairValueRecord(s)
# self.firstGlyphsList.extend(subtableItem.Coverage.glyphs)
def getSinglePairs(self):
for pairPos in self.pairPosList:
if pairPos.Format == 1:
# single pair adjustment
firstGlyphsList = pairPos.Coverage.glyphs
# This iteration is done by index so we have a way
# to reference the firstGlyphsList:
for ps_index, _ in enumerate(pairPos.PairSet):
for pairValueRecordItem in pairPos.PairSet[ps_index].PairValueRecord:
secondGlyph = pairValueRecordItem.SecondGlyph
valueFormat = pairPos.ValueFormat1
if valueFormat == 5: # RTL kerning
kernValue = "<%d 0 %d 0>" % (
pairValueRecordItem.Value1.XPlacement,
pairValueRecordItem.Value1.XAdvance)
elif valueFormat == 0: # RTL pair with value <0 0 0 0>
kernValue = "<0 0 0 0>"
elif valueFormat == 4: # LTR kerning
kernValue = pairValueRecordItem.Value1.XAdvance
else:
print(
"\tValueFormat1 = %d" % valueFormat,
file=sys.stdout)
continue # skip the rest
self.kerningPairs[(firstGlyphsList[ps_index], secondGlyph)] = kernValue
self.singlePairs[(firstGlyphsList[ps_index], secondGlyph)] = kernValue
def getClassPairs(self):
for loop, pairPos in enumerate(self.pairPosList):
if pairPos.Format == 2:
leftClasses = {}
rightClasses = {}
# Find left class with the Class1Record index="0".
# This first class is mixed into the "Coverage" table
# (e.g. all left glyphs) and has no class="X" property
# that is why we have to find the glyphs in that way.
lg0 = myLeftClass()
# list of all glyphs kerned to the left of a pair:
allLeftGlyphs = pairPos.Coverage.glyphs
# list of all glyphs contained in left-sided kerning classes:
# allLeftClassGlyphs = pairPos.ClassDef1.classDefs.keys()
singleGlyphs = []
classGlyphs = []
for gName, classID in pairPos.ClassDef1.classDefs.items():
if classID == 0:
singleGlyphs.append(gName)
else:
classGlyphs.append(gName)
# coverage glyphs minus glyphs in real class (without class 0)
lg0.glyphs = list(set(allLeftGlyphs) - set(classGlyphs))
lg0.glyphs.sort()
leftClasses[lg0.class1Record] = lg0
className = "class_%s_%s" % (loop, lg0.class1Record)
self.allLeftClasses[className] = lg0.glyphs
# Find all the remaining left classes:
for leftGlyph in pairPos.ClassDef1.classDefs:
class1Record = pairPos.ClassDef1.classDefs[leftGlyph]
if class1Record != 0: # this was the crucial line.
lg = myLeftClass()
lg.class1Record = class1Record
leftClasses.setdefault(
class1Record, lg).glyphs.append(leftGlyph)
self.allLeftClasses.setdefault(
"class_%s_%s" % (loop, lg.class1Record), lg.glyphs)
# Same for the right classes:
for rightGlyph in pairPos.ClassDef2.classDefs:
class2Record = pairPos.ClassDef2.classDefs[rightGlyph]
rg = myRightClass()
rg.class2Record = class2Record
rightClasses.setdefault(
class2Record, rg).glyphs.append(rightGlyph)
self.allRightClasses.setdefault(
"class_%s_%s" % (loop, rg.class2Record), rg.glyphs)
for record_l in leftClasses:
for record_r in rightClasses:
if pairPos.Class1Record[record_l].Class2Record[record_r]:
valueFormat = pairPos.ValueFormat1
if valueFormat in [4, 5]:
kernValue = pairPos.Class1Record[record_l].Class2Record[record_r].Value1.XAdvance
elif valueFormat == 0:
# valueFormat zero is caused by a value of <0 0 0 0> on a class-class pair; skip these
continue
else:
print(
"\tValueFormat1 = %d" % valueFormat,
file=sys.stdout)
continue # skip the rest
if kernValue != 0:
leftClassName = 'class_%s_%s' % (
loop, leftClasses[record_l].class1Record)
rightClassName = 'class_%s_%s' % (
loop, rightClasses[record_r].class2Record)
self.classPairs[(leftClassName, rightClassName)] = kernValue
for l in leftClasses[record_l].glyphs:
for r in rightClasses[record_r].glyphs:
if (l, r) in self.kerningPairs:
# if the kerning pair has already been assigned in pair-to-pair kerning
continue
else:
if valueFormat == 5: # RTL kerning
kernValue = "<%d 0 %d 0>" % (pairPos.Class1Record[record_l].Class2Record[record_r].Value1.XPlacement, pairPos.Class1Record[record_l].Class2Record[record_r].Value1.XAdvance)
self.kerningPairs[(l, r)] = kernValue
else:
print('ERROR', file=sys.stderr)
if __name__ == "__main__":
if len(sys.argv) == 2:
assumedFontPath = sys.argv[1]
if(
os.path.exists(assumedFontPath) and
os.path.splitext(assumedFontPath)[1].lower() in ['.otf', '.ttf']
):
fontPath = sys.argv[1]
f = OTFKernReader(fontPath)
finalList = []
for pair, value in f.kerningPairs.items():
finalList.append('/%s /%s %s' % (pair[0], pair[1], value))
finalList.sort()
output = '\n'.join(finalList)
print(output, file=sys.stdout)
print('\nTotal number of kerning pairs:', file=sys.stdout)
print(len(f.kerningPairs), file=sys.stdout)
# for i in sorted(f.allLeftClasses):
# print(i, f.allLeftClasses[i], file=sys.stdout)
else:
print('That is not a valid font.', file=sys.stderr)
else:
print('Please provide a font.', file=sys.stderr)
|
|
import toolz
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from . import base
from .. import transforms
fX = theano.config.floatX
class RemoveNodesWithClass(base.NetworkHandlerImpl):
"""
handler that adds hyperparameters to the network
"""
def __init__(self, cls):
self.cls = cls
def transform_network(self, network):
return transforms.remove_nodes_with_class(network, self.cls)
remove_nodes_with_class = RemoveNodesWithClass
class WithHyperparameters(base.NetworkHandlerImpl):
"""
handler that adds hyperparameters to the network
"""
def __init__(self, name, **kwargs):
self.name = name
self.hyperparameters = kwargs
def transform_network(self, network):
return transforms.add_hyperparameters(network,
self.name,
self.hyperparameters)
with_hyperparameters = WithHyperparameters
class OverrideHyperparameters(base.NetworkHandlerImpl):
"""
handler that adds override hyperparameters to the network
"""
def __init__(self, **hyperparameters):
self.hyperparameters = hyperparameters
def transform_network(self, network):
# FIXME make this a transform
def update_fn(override_hyperparameters):
return toolz.merge(override_hyperparameters,
self.hyperparameters)
kwargs = toolz.update_in(transforms.fns.network_to_kwargs(network),
["override_hyperparameters"],
update_fn)
return treeano.Network(**kwargs)
override_hyperparameters = OverrideHyperparameters
class UpdateHyperparameters(base.NetworkHandlerImpl):
"""
handler that replace hyperparameters of a node
"""
def __init__(self, node_name, **hyperparameters):
self.node_name = node_name
self.hyperparameters = hyperparameters
def transform_network(self, network):
return transforms.update_hyperparameters(network,
self.node_name,
self.hyperparameters)
update_hyperparameters = UpdateHyperparameters
class ScheduleHyperparameter(base.NetworkHandlerImpl):
def __init__(self,
schedule,
hyperparameter=None,
node_name=None,
target_node_name=None,
input_key=None,
shape=(),
dtype=fX):
"""
WARNING: saves a copy of the previous output
schedule:
a function that takes in the current input dictionary and the previous
output dictionary (or None for the initial value) and returns a new
value for the hyperparameter
hyperparameter:
name of the hyperparameter to provide
node_name:
name of the SharedHyperparameterNode to create
(default: generates one)
target_node_name:
name of the node to provide the hyperparameter to
(default: root node)
input_key:
what key to put in the input dict
(default: generates one)
"""
self.hyperparameter = hyperparameter
self.schedule = schedule
if node_name is None:
assert hyperparameter is not None
node_name = "scheduled:%s" % hyperparameter
self.node_name = node_name
self.target_node_name = target_node_name
if input_key is None:
input_key = node_name
self.input_key = input_key
self.shape = shape
self.dtype = dtype
self.previous_result_ = None
def transform_network(self, network):
# don't add node if it already exists
if self.node_name in network:
return network
assert self.hyperparameter is not None
if self.target_node_name is None:
target_node_name = network.root_node.name
else:
target_node_name = self.target_node_name
return transforms.add_parent(
network=network,
name=target_node_name,
parent_constructor=tn.SharedHyperparameterNode,
parent_name=self.node_name,
parent_kwargs=dict(
hyperparameter=self.hyperparameter,
dtype=self.dtype,
shape=self.shape,
),
)
def transform_compile_function_kwargs(self, state, **kwargs):
assert self.input_key not in kwargs["inputs"]
kwargs["inputs"][self.input_key] = (self.node_name, "hyperparameter")
return kwargs
def call(self, fn, in_dict, *args, **kwargs):
assert self.input_key not in in_dict
hyperparameter_value = self.schedule(in_dict, self.previous_result_)
# make a copy of the dict, since we are mutating it
in_dict = dict(in_dict)
in_dict[self.input_key] = np.array(hyperparameter_value,
dtype=self.dtype)
res = fn(in_dict, *args, **kwargs)
self.previous_result_ = res
return res
schedule_hyperparameter = ScheduleHyperparameter
class UseScheduledHyperparameter(base.NetworkHandlerImpl):
"""
allows a network to use the scheduled hyperparameter of a different
network
use case:
- having a validation network use the same parameter as a training
network
"""
def __init__(self, schedule_hyperparameter_handler):
assert isinstance(schedule_hyperparameter_handler,
ScheduleHyperparameter)
self.shh = schedule_hyperparameter_handler
def transform_network(self, network):
return self.shh.transform_network(network)
def transform_compile_function_kwargs(self, state, **kwargs):
return self.shh.transform_compile_function_kwargs(state, **kwargs)
use_scheduled_hyperparameter = UseScheduledHyperparameter
|
|
# -*- coding: utf-8 -*-
"""
> Overview
This program contains a sample implementation for loading a map produced
by Tiled in pyglet. The script can be run on its own to demonstrate its
capabilities, or the script can be imported to use its functionality. Users
will hopefully use the ResourceLoaderPyglet already provided in this.
Tiled may be found at http://mapeditor.org/
> Demo Controls
Holding the arrow keys scrolls the map.
Holding the left shift key makes you scroll faster.
Pressing the Esc key closes the program.
> Demo Features
The map is fully viewable by scrolling.
You can scroll outside of the bounds of the map.
All visible layers are loaded and displayed.
Transparency is supported. (Nothing needed to be done for this.)
Minimal OpenGL used. (Less of a learning curve.)
"""
# Versioning scheme based on: http://en.wikipedia.org/wiki/Versioning#Designating_development_stage
#
# +-- api change, probably incompatible with older versions
# | +-- enhancements but no api change
# | |
# major.minor[.build[.revision]]
# |
# +-|* 0 for alpha (status)
# |* 1 for beta (status)
# |* 2 for release candidate
# |* 3 for (public) release
#
# For instance:
# * 1.2.0.1 instead of 1.2-a
# * 1.2.1.2 instead of 1.2-b2 (beta with some bug fixes)
# * 1.2.2.3 instead of 1.2-rc (release candidate)
# * 1.2.3.0 instead of 1.2-r (commercial distribution)
# * 1.2.3.5 instead of 1.2-r5 (commercial distribution with many bug fixes)
__revision__ = "$Rev$"
__version__ = "3.0.0." + __revision__[6:-2]
__author__ = 'DR0ID @ 2009-2011'
# -----------------------------------------------------------------------------
import sys
from xml.dom import minidom, Node
import io
import os.path
import pyglet
import copy
from . import tmxreader
# -----------------------------------------------------------------------------
# [20:31] bjorn: Of course, for fastest rendering, you would combine the used
# tiles into a single texture and set up arrays of vertex and texture coordinates.
# .. so that the video card can dump the map to the screen without having to
# analyze the tile data again and again.
class ResourceLoaderPyglet(tmxreader.AbstractResourceLoader):
"""Loads all tile images and lays them out on a grid.
Unlike the AbstractResourceLoader this class derives from, no overridden
methods use a colorkey parameter. A colorkey is only useful for pygame.
This loader adds its own pyglet-specific parameter to deal with
pyglet.image.load's capability to work with file-like objects.
"""
def load(self, tile_map):
tmxreader.AbstractResourceLoader.load(self, tile_map)
# ISSUE 17: flipped tiles
for layer in self.world_map.layers:
if layer.is_object_group: continue
for gid in layer.decoded_content:
if gid not in self.indexed_tiles:
if gid & self.FLIP_X or gid & self.FLIP_Y:
image_gid = gid & ~(self.FLIP_X | self.FLIP_Y)
offx, offy, img = self.indexed_tiles[image_gid]
# TODO: how to flip it? this does mix textures and image classes
img = copy.deepcopy(img)
tex = img.get_texture()
tex.anchor_x = tex.width // 2
tex.anchor_y = tex.height // 2
tex2 = tex.get_transform(bool(gid & self.FLIP_X), bool(gid & self.FLIP_Y))
# img2 = pyglet.image.ImageDataRegion(img.x, img.y, tex2.width, tex2.height, tex2.image_data))
tex.anchor_x = 0
tex.anchor_y = 0
self.indexed_tiles[gid] = (offx, offy, tex2)
def _load_image(self, filename, fileobj=None):
"""Load a single image.
Images are loaded only once. Subsequence loads call upon a cache.
:Parameters:
filename : string
Path to the file to be loaded.
fileobj : file
A file-like object which pyglet can decode.
:rtype: A subclass of AbstractImage.
"""
img = self._img_cache.get(filename, None)
if img is None:
if fileobj:
img = pyglet.image.load(filename, fileobj,
pyglet.image.codecs.get_decoders("*.png")[0])
else:
img = pyglet.image.load(filename)
self._img_cache[filename] = img
return img
def _load_image_part(self, filename, x, y, w, h):
"""Load a section of an image and returns its ImageDataRegion."""
return self._load_image(filename).get_region(x, y, w, h)
def _load_image_parts(self, filename, margin, spacing, tile_width, tile_height, colorkey=None):
"""Load different tile images from one source image.
:Parameters:
filename : string
Path to image to be loaded.
margin : int
The margin around the image.
spacing : int
The space between the tile images.
tilewidth : int
The width of a single tile.
tileheight : int
The height of a single tile.
colorkey : ???
Unused. (Intended for pygame.)
:rtype: A list of images.
"""
source_img = self._load_image(filename)
# ISSUE 16 fixed wrong sized tilesets
height = (source_img.height // tile_height) * tile_height
width = (source_img.width // tile_width) * tile_width
images = []
# Reverse the map column reading to compensate for pyglet's y-origin.
for y in range(height - tile_height, margin - tile_height, -tile_height - spacing):
for x in range(margin, width, tile_width + spacing):
img_part = self._load_image_part(filename, x, y - spacing, tile_width, tile_height)
images.append(img_part)
return images
def _load_image_file_like(self, file_like_obj):
"""Loads a file-like object and returns its subclassed AbstractImage."""
# TODO: Ask myself why this extra indirection is necessary.
return self._load_image(file_like_obj, file_like_obj)
# -----------------------------------------------------------------------------
def demo_pyglet(file_name):
"""Demonstrates loading, rendering, and traversing a Tiled map in pyglet.
TODO:
Maybe use this to put topleft as origin:
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, (double)mTarget->w, (double)mTarget->h, 0.0, -1.0, 1.0);
"""
import pyglet
from pyglet.gl import glTranslatef, glLoadIdentity
world_map = tmxreader.TileMapParser().parse_decode(file_name)
# delta is the x/y position of the map view.
# delta is a list so that it can be accessed from the on_draw method of
# window and the update function. Note that the position is in integers to
# match Pyglet Sprites. Using floating-point numbers causes graphical
# problems. See http://groups.google.com/group/pyglet-users/browse_thread/thread/52f9ae1ef7b0c8fa?pli=1
delta = [200, -world_map.pixel_height+150]
frames_per_sec = 1.0 / 30.0
window = pyglet.window.Window()
@window.event
def on_draw():
window.clear()
# Reset the "eye" back to the default location.
glLoadIdentity()
# Move the "eye" to the current location on the map.
glTranslatef(delta[0], delta[1], 0.0)
# TODO: [21:03] thorbjorn: DR0ID_: You can generally determine the range of tiles that are visible before your drawing loop, which is much faster than looping over all tiles and checking whether it is visible for each of them.
# [21:06] DR0ID_: probably would have to rewrite the pyglet demo to use a similar render loop as you mentioned
# [21:06] thorbjorn: Yeah.
# [21:06] DR0ID_: I'll keep your suggestion in mind, thanks
# [21:06] thorbjorn: I haven't written a specific OpenGL renderer yet, so not sure what's the best approach for a tile map.
# [21:07] thorbjorn: Best to create a single texture with all your tiles, bind it, set up your vertex arrays and fill it with the coordinates of the tiles currently on the screen, and then let OpenGL draw the bunch.
# [21:08] DR0ID_: for each layer?
# [21:08] DR0ID_: yeah, probably a good approach
# [21:09] thorbjorn: Ideally for all layers at the same time, if you don't have to draw anything in between.
# [21:09] DR0ID_: well, the NPC and other dynamic things need to be drawn in between, right?
# [21:09] thorbjorn: Right, so maybe once for the bottom layers, then your complicated stuff, and then another time for the layers on top.
batch.draw()
keys = pyglet.window.key.KeyStateHandler()
window.push_handlers(keys)
resources = ResourceLoaderPyglet()
resources.load(world_map)
def update(dt):
# The speed is 3 by default.
# When left Shift is held, the speed increases.
# The speed interpolates based on time passed, so the demo navigates
# at a reasonable pace even on huge maps.
speed = (3 + keys[pyglet.window.key.LSHIFT] * 6) * \
int(dt / frames_per_sec)
if keys[pyglet.window.key.LEFT]:
delta[0] += speed
if keys[pyglet.window.key.RIGHT]:
delta[0] -= speed
if keys[pyglet.window.key.UP]:
delta[1] -= speed
if keys[pyglet.window.key.DOWN]:
delta[1] += speed
# Generate the graphics for every visible tile.
batch = pyglet.graphics.Batch()
sprites = []
for group_num, layer in enumerate(world_map.layers):
if not layer.visible:
continue
if layer.is_object_group:
# This is unimplemented in this minimal-case example code.
# Should you as a user of tmxreader need this layer,
# I hope to have a separate demo using objects as well.
continue
group = pyglet.graphics.OrderedGroup(group_num)
for ytile in range(layer.height):
for xtile in range(layer.width):
image_id = layer.content2D[xtile][ytile]
if image_id:
image_file = resources.indexed_tiles[image_id][2]
# The loader needed to load the images upside-down to match
# the tiles to their correct images. This reversal must be
# done again to render the rows in the correct order.
sprites.append(pyglet.sprite.Sprite(image_file,
world_map.tilewidth * xtile,
world_map.tileheight * (layer.height - ytile),
batch=batch, group=group))
pyglet.clock.schedule_interval(update, frames_per_sec)
pyglet.app.run()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# import cProfile
# cProfile.run('main()', "stats.profile")
# import pstats
# p = pstats.Stats("stats.profile")
# p.strip_dirs()
# p.sort_stats('time')
# p.print_stats()
if len(sys.argv) == 2:
demo_pyglet(sys.argv[1])
else:
print(('Usage: python %s your_map.tmx' % os.path.basename(__file__)))
|
|
import logging
from discord.ext import commands
from discord.ext.commands import Context
from cogbot import checks
from cogbot.cog_bot import CogBot
from cogbot.extensions.groups.error import *
from cogbot.extensions.groups.group_directory import GroupDirectory
log = logging.getLogger(__name__)
class GroupsConfig:
DEFAULT_COOLDOWN_RATE = 5
DEFAULT_COOLDOWN_PER = 60
def __init__(self, **options):
self.cooldown_rate = options.pop('cooldown_rate', self.DEFAULT_COOLDOWN_RATE)
self.cooldown_per = options.pop('cooldown_per', self.DEFAULT_COOLDOWN_PER)
self.server_groups = options.pop('server_groups', {})
class Groups:
def __init__(self, bot: CogBot, ext: str):
self.bot = bot
options = bot.state.get_extension_state(ext)
self.config = GroupsConfig(**options)
# TODO fix hack
self.cmd_groups._buckets._cooldown.rate = self.config.cooldown_rate
self.cmd_groups._buckets._cooldown.per = self.config.cooldown_per
self._group_directory = GroupDirectory()
async def on_ready(self):
# Load initial groups after the bot has made associations with servers.
# TODO sometimes runs multiple times, figure out a better way
for server_id, groups in self.config.server_groups.items():
server = self.bot.get_server(server_id)
if server_id not in self._group_directory._role_map:
for group in groups:
self._group_directory.add_group(server, group)
async def add_groups(self, ctx: Context, *groups):
server, author = ctx.message.server, ctx.message.author
# TODO filter valid groups beforehand
if groups:
for group in groups:
try:
self._group_directory.add_group(server, group)
log.info(f'[{server}/{author}] Added group "{group}"')
await self.bot.react_success(ctx)
except NoSuchRoleNameError:
log.warning(f'[{server}/{author}] Tried to add group "{group}" without a role')
await self.bot.react_failure(ctx)
except GroupAlreadyExistsError:
log.warning(f'[{server}/{author}] Tried to add pre-existing group "{group}"')
await self.bot.react_failure(ctx)
else:
await self.bot.react_question(ctx)
async def remove_groups(self, ctx: Context, *groups):
server, author = ctx.message.server, ctx.message.author
# TODO filter valid groups beforehand
if groups:
for group in groups:
try:
self._group_directory.remove_group(server, group)
log.info(f'[{server}/{author}] Removed group "{group}"')
await self.bot.react_success(ctx)
except NoSuchGroupError:
log.warning(f'[{server}/{author}] Tried to remove non-existent group "{group}"')
await self.bot.react_failure(ctx)
else:
await self.bot.react_question(ctx)
async def join_groups(self, ctx: Context, *groups):
server, author = ctx.message.server, ctx.message.author
# TODO filter valid groups beforehand
if groups:
for group in groups:
try:
role = self._group_directory.get_role(server, group)
if role in author.roles:
log.info(f'[{server}/{author}] Tried to join pre-joined group "{group}"')
await self.bot.react_neutral(ctx)
else:
await self.bot.add_roles(author, role)
log.info(f'[{server}/{author}] Joined group "{group}"')
await self.bot.react_success(ctx)
except NoSuchGroupError:
log.warning(f'[{server}/{author}] Tried to join non-existent group "{group}"')
await self.bot.react_failure(ctx)
else:
await self.bot.react_question(ctx)
async def leave_groups(self, ctx: Context, *groups):
server, author = ctx.message.server, ctx.message.author
# TODO filter valid groups beforehand
if groups:
for group in groups:
try:
role = self._group_directory.get_role(server, group)
if role in author.roles:
await self.bot.remove_roles(author, role)
log.info(f'[{server}/{author}] Left group "{group}"')
await self.bot.react_success(ctx)
else:
log.info(f'[{server}/{author}] Tried to leave un-joined group "{group}"')
await self.bot.react_neutral(ctx)
except NoSuchGroupError:
log.warning(f'[{server}/{author}] Tried to leave non-existent group "{group}"')
await self.bot.react_failure(ctx)
else:
await self.bot.react_question(ctx)
async def leave_all_groups(self, ctx: Context):
server, author = ctx.message.server, ctx.message.author
group_roles = [role for role in author.roles if self._group_directory.is_role(server, str(role))]
await self.bot.remove_roles(author, *group_roles)
log.info(f'[{server}/{author}] Left all {len(group_roles)} groups')
await self.bot.react_success(ctx)
async def list_groups(self, ctx: Context):
groups = list(self._group_directory.groups(ctx.message.server))
if groups:
groups_str = ', '.join([('**' + group + '**') for group in groups])
reply = f'Available groups: {groups_str}'
else:
reply = f'No groups available.'
await self.bot.send_message(ctx.message.channel, reply)
async def list_group_members(self, ctx: Context, group: str):
try:
members = self._group_directory.get_members(ctx.message.server, group)
members_str = ', '.join([member.name for member in members])
log.info('-> group members: ' + members_str)
if members:
reply = f'Group **{group}** has members: {members_str}'
else:
reply = f'Group **{group}** has no members.'
except NoSuchGroupError:
log.warning('-> group does not exist')
reply = f'Group **{group}** does not exist.'
await self.bot.send_message(ctx.message.channel, reply)
@commands.cooldown(GroupsConfig.DEFAULT_COOLDOWN_RATE, GroupsConfig.DEFAULT_COOLDOWN_PER, commands.BucketType.user)
@commands.group(pass_context=True, name='groups')
async def cmd_groups(self, ctx: Context):
if ctx.invoked_subcommand is None:
await self.list_groups(ctx)
@checks.is_moderator()
@cmd_groups.command(pass_context=True, name='add', hidden=True)
async def cmd_groups_add(self, ctx: Context, *groups):
await self.add_groups(ctx, *groups)
@checks.is_moderator()
@cmd_groups.command(pass_context=True, name='remove', hidden=True)
async def cmd_groups_remove(self, ctx: Context, *groups):
await self.remove_groups(ctx, *groups)
@checks.is_moderator()
@cmd_groups.command(pass_context=True, name='members', hidden=True)
async def cmd_groups_members(self, ctx: Context, group: str):
await self.list_group_members(ctx, group)
@cmd_groups.command(pass_context=True, name='list')
async def cmd_groups_list(self, ctx: Context):
await self.list_groups(ctx)
@cmd_groups.command(pass_context=True, name='join')
async def cmd_groups_join(self, ctx: Context, *groups):
await self.join_groups(ctx, *groups)
@cmd_groups.command(pass_context=True, name='leave')
async def cmd_groups_leave(self, ctx: Context, *groups):
await self.leave_groups(ctx, *groups)
@cmd_groups.command(pass_context=True, name='leaveall')
async def cmd_groups_leaveall(self, ctx: Context):
await self.leave_all_groups(ctx)
|
|
import random
from collections.abc import Iterable
import numpy as np
from dipy.tracking.localtrack import local_tracker, pft_tracker
from dipy.tracking.stopping_criterion import (AnatomicalStoppingCriterion,
StreamlineStatus)
from dipy.tracking import utils
class LocalTracking(object):
@staticmethod
def _get_voxel_size(affine):
"""Computes the voxel sizes of an image from the affine.
Checks that the affine does not have any shear because local_tracker
assumes that the data is sampled on a regular grid.
"""
lin = affine[:3, :3]
dotlin = np.dot(lin.T, lin)
# Check that the affine is well behaved
if not np.allclose(np.triu(dotlin, 1), 0., atol=1e-5):
msg = ("The affine provided seems to contain shearing, data must "
"be acquired or interpolated on a regular grid to be used "
"with `LocalTracking`.")
raise ValueError(msg)
return np.sqrt(dotlin.diagonal())
def __init__(self, direction_getter, stopping_criterion, seeds, affine,
step_size, max_cross=None, maxlen=500, fixedstep=True,
return_all=True, random_seed=None, save_seeds=False):
"""Creates streamlines by using local fiber-tracking.
Parameters
----------
direction_getter : instance of DirectionGetter
Used to get directions for fiber tracking.
stopping_criterion : instance of StoppingCriterion
Identifies endpoints and invalid points to inform tracking.
seeds : array (N, 3)
Points to seed the tracking. Seed points should be given in point
space of the track (see ``affine``).
affine : array (4, 4)
Coordinate space for the streamline point with respect to voxel
indices of input data. This affine can contain scaling, rotational,
and translational components but should not contain any shearing.
An identity matrix can be used to generate streamlines in "voxel
coordinates" as long as isotropic voxels were used to acquire the
data.
step_size : float
Step size used for tracking.
max_cross : int or None
The maximum number of direction to track from each seed in crossing
voxels. By default all initial directions are tracked.
maxlen : int
Maximum number of steps to track from seed. Used to prevent
infinite loops.
fixedstep : bool
If true, a fixed stepsize is used, otherwise a variable step size
is used.
return_all : bool
If true, return all generated streamlines, otherwise only
streamlines reaching end points or exiting the image.
random_seed : int
The seed for the random seed generator (numpy.random.seed and
random.seed).
save_seeds : bool
If True, return seeds alongside streamlines
"""
self.direction_getter = direction_getter
self.stopping_criterion = stopping_criterion
self.seeds = seeds
if affine.shape != (4, 4):
raise ValueError("affine should be a (4, 4) array.")
if step_size <= 0:
raise ValueError("step_size must be greater than 0.")
if maxlen < 1:
raise ValueError("maxlen must be greater than 0.")
if not isinstance(seeds, Iterable):
raise ValueError("seeds should be (N,3) array.")
self.affine = affine
self._voxel_size = np.ascontiguousarray(self._get_voxel_size(affine),
dtype=float)
self.step_size = step_size
self.fixed_stepsize = fixedstep
self.max_cross = max_cross
self.max_length = maxlen
self.return_all = return_all
self.random_seed = random_seed
self.save_seeds = save_seeds
def _tracker(self, seed, first_step, streamline):
return local_tracker(self.direction_getter,
self.stopping_criterion,
seed,
first_step,
self._voxel_size,
streamline,
self.step_size,
self.fixed_stepsize)
def __iter__(self):
# Make tracks, move them to point space and return
track = self._generate_streamlines()
return utils.transform_tracking_output(track, self.affine,
save_seeds=self.save_seeds)
def _generate_streamlines(self):
"""A streamline generator"""
# Get inverse transform (lin/offset) for seeds
inv_A = np.linalg.inv(self.affine)
lin = inv_A[:3, :3]
offset = inv_A[:3, 3]
F = np.empty((self.max_length + 1, 3), dtype=float)
B = F.copy()
for s in self.seeds:
s = np.dot(lin, s) + offset
# Set the random seed in numpy and random
if self.random_seed is not None:
s_random_seed = hash(np.abs((np.sum(s)) + self.random_seed)) \
% (2**32 - 1)
random.seed(s_random_seed)
np.random.seed(s_random_seed)
directions = self.direction_getter.initial_direction(s)
if directions.size == 0 and self.return_all:
# only the seed position
if self.save_seeds:
yield [s], s
else:
yield [s]
directions = directions[:self.max_cross]
for first_step in directions:
stepsF, stream_status = self._tracker(s, first_step, F)
if not (self.return_all or
stream_status == StreamlineStatus.ENDPOINT or
stream_status == StreamlineStatus.OUTSIDEIMAGE):
continue
first_step = -first_step
stepsB, stream_status = self._tracker(s, first_step, B)
if not (self.return_all or
stream_status == StreamlineStatus.ENDPOINT or
stream_status == StreamlineStatus.OUTSIDEIMAGE):
continue
if stepsB == 1:
streamline = F[:stepsF].copy()
else:
parts = (B[stepsB - 1:0:-1], F[:stepsF])
streamline = np.concatenate(parts, axis=0)
# move to the next streamline if only the seed position
# and not return all
if len(streamline) > 1 or self.return_all:
if self.save_seeds:
yield streamline, s
else:
yield streamline
class ParticleFilteringTracking(LocalTracking):
def __init__(self, direction_getter, stopping_criterion, seeds, affine,
step_size, max_cross=None, maxlen=500,
pft_back_tracking_dist=2, pft_front_tracking_dist=1,
pft_max_trial=20, particle_count=15, return_all=True,
random_seed=None, save_seeds=False):
r"""A streamline generator using the particle filtering tractography
method [1]_.
Parameters
----------
direction_getter : instance of ProbabilisticDirectionGetter
Used to get directions for fiber tracking.
stopping_criterion : instance of AnatomicalStoppingCriterion
Identifies endpoints and invalid points to inform tracking.
seeds : array (N, 3)
Points to seed the tracking. Seed points should be given in point
space of the track (see ``affine``).
affine : array (4, 4)
Coordinate space for the streamline point with respect to voxel
indices of input data. This affine can contain scaling, rotational,
and translational components but should not contain any shearing.
An identity matrix can be used to generate streamlines in "voxel
coordinates" as long as isotropic voxels were used to acquire the
data.
step_size : float
Step size used for tracking.
max_cross : int or None
The maximum number of direction to track from each seed in crossing
voxels. By default all initial directions are tracked.
maxlen : int
Maximum number of steps to track from seed. Used to prevent
infinite loops.
pft_back_tracking_dist : float
Distance in mm to back track before starting the particle filtering
tractography. The total particle filtering tractography distance is
equal to back_tracking_dist + front_tracking_dist.
By default this is set to 2 mm.
pft_front_tracking_dist : float
Distance in mm to run the particle filtering tractography after the
the back track distance. The total particle filtering tractography
distance is equal to back_tracking_dist + front_tracking_dist. By
default this is set to 1 mm.
pft_max_trial : int
Maximum number of trial for the particle filtering tractography
(Prevents infinite loops).
particle_count : int
Number of particles to use in the particle filter.
return_all : bool
If true, return all generated streamlines, otherwise only
streamlines reaching end points or exiting the image.
random_seed : int
The seed for the random seed generator (numpy.random.seed and
random.seed).
save_seeds : bool
If True, return seeds alongside streamlines
References
----------
.. [1] Girard, G., Whittingstall, K., Deriche, R., & Descoteaux, M.
Towards quantitative connectivity analysis: reducing
tractography biases. NeuroImage, 98, 266-278, 2014.
"""
if not isinstance(stopping_criterion, AnatomicalStoppingCriterion):
raise ValueError("expecting AnatomicalStoppingCriterion")
self.pft_max_nbr_back_steps = int(np.ceil(pft_back_tracking_dist
/ step_size))
self.pft_max_nbr_front_steps = int(np.ceil(pft_front_tracking_dist
/ step_size))
pft_max_steps = (self.pft_max_nbr_back_steps +
self.pft_max_nbr_front_steps)
if (self.pft_max_nbr_front_steps < 0
or self.pft_max_nbr_back_steps < 0
or pft_max_steps < 1):
raise ValueError("The number of PFT steps must be greater than 0.")
if particle_count <= 0:
raise ValueError("The particle count must be greater than 0.")
self.directions = np.empty((maxlen + 1, 3), dtype=float)
self.pft_max_trial = pft_max_trial
self.particle_count = particle_count
self.particle_paths = np.empty((2, self.particle_count,
pft_max_steps + 1, 3),
dtype=float)
self.particle_weights = np.empty(self.particle_count, dtype=float)
self.particle_dirs = np.empty((2, self.particle_count,
pft_max_steps + 1, 3), dtype=float)
self.particle_steps = np.empty((2, self.particle_count), dtype=int)
self.particle_stream_statuses = np.empty((2, self.particle_count),
dtype=int)
super(ParticleFilteringTracking, self).__init__(direction_getter,
stopping_criterion,
seeds,
affine,
step_size,
max_cross,
maxlen,
True,
return_all,
random_seed,
save_seeds)
def _tracker(self, seed, first_step, streamline):
return pft_tracker(self.direction_getter,
self.stopping_criterion,
seed,
first_step,
self._voxel_size,
streamline,
self.directions,
self.step_size,
self.pft_max_nbr_back_steps,
self.pft_max_nbr_front_steps,
self.pft_max_trial,
self.particle_count,
self.particle_paths,
self.particle_dirs,
self.particle_weights,
self.particle_steps,
self.particle_stream_statuses)
|
|
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from .ome_data import tags_data, projects_datasets, original_files, mirax_files, datasets_files
from .ome_data.original_files import DuplicatedEntryError, get_original_file_by_id, get_original_file
from .ome_data.mirax_files import InvalidMiraxFile, InvalidMiraxFolder
from . import settings
from .slides_manager import RenderingEngineFactory
from .dzi_adapter import DZIAdapterFactory
from .dzi_adapter.errors import InvalidColorPalette, InvalidAttribute
import logging
from distutils.util import strtobool
try:
import simplejson as json
except ImportError:
import json
from omeroweb.webclient.decorators import login_required
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseServerError, HttpResponseBadRequest
from django.shortcuts import render
logger = logging.getLogger(__name__)
def check_app(request):
return HttpResponse("ome_seadragon working!")
def check_repository(request):
if settings.IMGS_REPOSITORY:
return HttpResponse(settings.IMGS_REPOSITORY)
else:
return HttpResponse("No repository has been configured")
@login_required()
def start_connection(request, conn=None, **kwargs):
public_user_allowed = bool(strtobool(request.GET.get('allow_public_user', default='true')))
if not public_user_allowed:
if settings.OME_PUBLIC_USER is None:
return HttpResponseServerError('"omero.web.ome_seadragon.ome_public_user" property was not configured on the server')
if conn.getUser().getName() == settings.OME_PUBLIC_USER:
return HttpResponse('Unauthorized', status=401)
return HttpResponse(status=204)
@login_required()
def check_image_path(request, image_id, conn=None, **kwargs):
rendering_engine = RenderingEngineFactory().get_tiles_rendering_engine(image_id, conn)
return HttpResponse(rendering_engine._get_image_path())
def get_example_viewer(request, image_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
mirax = bool(strtobool(request.GET.get('mirax_image', default='false')))
return render(request, 'ome_seadragon/test/test_viewer.html',
{'image_id': image_id, 'host_name': base_url, 'mirax': mirax})
def get_example_viewer_json(request, image_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
mirax = bool(strtobool(request.GET.get('mirax_image', default='false')))
return render(request, 'ome_seadragon/test/test_viewer_json.html',
{'image_id': image_id, 'host_name': base_url, 'mirax': mirax})
def get_example_sequence_viewer(request, dataset_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
return render(request, 'ome_seadragon/test/test_sequence_viewer.html',
{'dataset_id': dataset_id, 'host_name': base_url})
def get_example_double_viewer(request, image_a_id, image_b_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
# no MIRAX images support right now
return render(request, 'ome_seadragon/test/test_double_viewer.html',
{'image_a_id': image_a_id, 'image_b_id': image_b_id,
'host_name': base_url})
def get_example_annotations(request, image_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
mirax = bool(strtobool(request.GET.get('mirax_image', default='false')))
return render(request, 'ome_seadragon/test/test_annotations.html',
{'image_id': image_id, 'host_name': base_url, 'mirax': mirax})
def get_example_ome_rois(request, image_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
return render(request, 'ome_seadragon/test/test_ome_roi.html',
{'image_id': image_id, 'host_name': base_url})
def get_example_interactive_markers(request, image_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
mirax = bool(strtobool(request.GET.get('mirax_image', default='false')))
return render(request, 'ome_seadragon/test/test_markers.html',
{'image_id': image_id, 'host_name': base_url, 'mirax': mirax})
def get_example_interactive_polygons(request, image_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
mirax = bool(strtobool(request.GET.get('mirax_image', default='false')))
return render(request, 'ome_seadragon/test/test_polygons.html',
{'image_id': image_id, 'host_name': base_url, 'mirax': mirax})
def get_example_interactive_rulers(request, image_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
mirax = bool(strtobool(request.GET.get('mirax_image', default='false')))
return render(request, 'ome_seadragon/test/test_rulers.html',
{'image_id': image_id, 'host_name': base_url, 'mirax': mirax})
def get_example_interactive_freehand(request, image_id):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
mirax = bool(strtobool(request.GET.get('mirax_image', default='false')))
return render(request, 'ome_seadragon/test/test_freehand_drawing.html',
{'image_id': image_id, 'host_name': base_url, 'mirax': mirax})
def get_example_array_viewer(request, dataset_label):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
return render(request, 'ome_seadragon/test/test_array_viewer.html',
{'dataset_label': dataset_label, 'host_name': base_url})
def get_example_overlay_viewer(request, image_id, dataset_label):
base_url = '%s://%s' % (request.META['wsgi.url_scheme'], request.META['HTTP_HOST'])
mirax = bool(strtobool(request.GET.get('mirax_image', default='false')))
return render(request, 'ome_seadragon/test/test_overlay_viewer.html',
{'image_id': image_id, 'dataset_label': dataset_label, 'host_name': base_url ,
'mirax': mirax})
@login_required()
def get_projects(request, conn=None, **kwargs):
try:
fetch_datasets = bool(strtobool(request.GET.get('datasets')))
except (ValueError, AttributeError):
fetch_datasets = False
projects = projects_datasets.get_projects(conn, fetch_datasets)
return HttpResponse(json.dumps(projects), content_type='application/json')
@login_required()
def get_project(request, project_id, conn=None, **kwargs):
try:
fetch_datasets = bool(strtobool(request.GET.get('datasets')))
except (ValueError, AttributeError):
fetch_datasets = False
try:
fetch_images = bool(strtobool(request.GET.get('images')))
except (ValueError, AttributeError):
fetch_images = False
try:
expand_series = bool(strtobool(request.GET.get('full_series')))
except (ValueError, AttributeError):
expand_series = False
project = projects_datasets.get_project(conn, project_id, fetch_datasets, fetch_images,
expand_series)
return HttpResponse(json.dumps(project), content_type='application/json')
@login_required()
def get_dataset(request, dataset_id, conn=None, **kwargs):
try:
fetch_images = bool(strtobool(request.GET.get('images')))
except (ValueError, AttributeError):
fetch_images = False
try:
expand_series = bool(strtobool(request.GET.get('full_series')))
except (ValueError, AttributeError):
expand_series = False
dataset = projects_datasets.get_dataset(conn, dataset_id, fetch_images,
expand_series)
return HttpResponse(json.dumps(dataset), content_type='application/json')
@login_required()
def get_image(request, image_id, conn=None, **kwargs):
try:
fetch_rois = bool(strtobool(request.GET.get('rois')))
except (ValueError, AttributeError):
fetch_rois = False
image = projects_datasets.get_image(conn, image_id, fetch_rois)
return HttpResponse(json.dumps(image), content_type='application/json')
@login_required()
def get_images_quick_list(request, conn=None, **kwargs):
try:
expand_series = bool(strtobool(request.GET.get('full_series')))
except (ValueError, AttributeError):
expand_series = False
images_list = projects_datasets.get_images_quick_list(conn, expand_series)
return HttpResponse(json.dumps(images_list), content_type='application/json')
@login_required()
def get_annotations(request, conn=None, **kwargs):
try:
fetch_images = bool(strtobool(request.GET.get('fetch_imgs')))
except (ValueError, AttributeError):
fetch_images = False
annotations = tags_data.get_annotations_list(conn, fetch_images)
return HttpResponse(json.dumps(annotations), content_type='application/json')
@login_required()
def get_tagset(request, tagset_id, conn=None, **kwargs):
try:
fetch_tags = bool(strtobool(request.GET.get('tags')))
except (ValueError, AttributeError):
fetch_tags = False
try:
fetch_images = bool(strtobool(request.GET.get('images')))
except (ValueError, AttributeError):
fetch_images = False
tagset = tags_data.get_tagset(conn, tagset_id, fetch_tags, fetch_images)
return HttpResponse(json.dumps(tagset), content_type='application/json')
@login_required()
def get_tag(request, tag_id, conn=None, **kwargs):
try:
fetch_images = bool(strtobool(request.GET.get('images')))
except (ValueError, AttributeError):
fetch_images = False
tag = tags_data.get_tag(conn, tag_id, fetch_images)
return HttpResponse(json.dumps(tag), content_type='application/json')
@login_required()
def find_annotations(request, conn=None, **kwargs):
search_pattern = request.GET.get('query')
try:
fetch_images = bool(strtobool(request.GET.get('fetch_imgs')))
except (ValueError, AttributeError):
fetch_images = False
logger.debug('"fetch_imgs" value %r', fetch_images)
annotations = tags_data.find_annotations(search_pattern, conn, fetch_images)
return HttpResponse(json.dumps(annotations), content_type='application/json')
@login_required()
def get_image_dzi(request, image_id, fetch_original_file=False, file_mimetype=None, conn=None, **kwargs):
try:
tile_size = int(request.GET.get('tile_size'))
except TypeError:
tile_size = None
try:
limit_bounds = bool(strtobool(request.GET.get('limit_bounds')))
except AttributeError:
limit_bounds = None
rf = RenderingEngineFactory()
rendering_engine = rf.get_primary_tiles_rendering_engine(image_id, conn)
try:
dzi_metadata = rendering_engine.get_dzi_description(fetch_original_file, file_mimetype,
tile_size, limit_bounds)
except Exception as e:
rendering_engine = rf.get_secondary_tiles_rendering_engine(image_id, conn)
if rendering_engine:
dzi_metadata = rendering_engine.get_dzi_description(fetch_original_file, file_mimetype,
tile_size, limit_bounds)
else:
raise e
if dzi_metadata:
return HttpResponse(dzi_metadata, content_type='application/xml')
else:
return HttpResponseNotFound('No image with ID %s' % image_id)
@login_required()
def get_image_json(request, image_id, fetch_original_file=False, file_mimetype=None, conn=None, **kwargs):
try:
tile_size = int(request.GET.get('tile_size'))
except TypeError:
tile_size = None
rf = RenderingEngineFactory()
rendering_engine = rf.get_primary_tiles_rendering_engine(image_id, conn)
resource_path = request.build_absolute_uri('%s_files/' % image_id)
try:
json_metadata = rendering_engine.get_json_description(resource_path, fetch_original_file,
file_mimetype, tile_size)
except Exception as e:
rendering_engine = rf.get_secondary_tiles_rendering_engine(image_id, conn)
if rendering_engine:
json_metadata = rendering_engine.get_json_description(resource_path, fetch_original_file,
file_mimetype, tile_size)
else:
raise e
if json_metadata:
return HttpResponse(json.dumps(json_metadata), content_type='application/json')
else:
return HttpResponseNotFound('No image with ID %s' % image_id)
@login_required()
def get_image_metadata(request, image_id, fetch_original_file=False, file_mimetype=None, conn=None, **kwargs):
try:
tile_size = int(request.GET.get('tile_size'))
except TypeError:
tile_size = None
rf = RenderingEngineFactory()
rendering_engine = rf.get_primary_tiles_rendering_engine(image_id, conn)
resource_path = request.build_absolute_uri('%s_files/' % image_id)
try:
img_metadata = rendering_engine.get_image_description(resource_path, fetch_original_file,
file_mimetype, tile_size)
except Exception as e:
rendering_engine = rf.get_secondary_tiles_rendering_engine(image_id, conn)
if rendering_engine:
img_metadata = rendering_engine.get_image_description(resource_path, fetch_original_file,
file_mimetype, tile_size)
else:
raise e
if img_metadata:
return HttpResponse(json.dumps(img_metadata), content_type='application/json')
else:
return HttpResponseNotFound('No image with ID %s' % image_id)
@login_required()
def get_image_thumbnail(request, image_id, fetch_original_file=False,
file_mimetype=None, conn=None, **kwargs):
rf = RenderingEngineFactory()
rendering_engine = rf.get_primary_thumbnails_rendering_engine(image_id, conn)
try:
thumbnail, image_format = rendering_engine.get_thumbnail(int(request.GET.get('size')),
fetch_original_file, file_mimetype)
except Exception as e:
rendering_engine = rf.get_secondary_thumbnails_rendering_engine(image_id, conn)
if rendering_engine:
thumbnail, image_format = rendering_engine.get_thumbnail(int(request.GET.get('size')),
fetch_original_file, file_mimetype)
else:
raise e
if thumbnail:
response = HttpResponse(content_type="image/%s" % image_format)
thumbnail.save(response, image_format)
return response
else:
return HttpResponseServerError('Unable to load thumbnail')
@login_required()
def get_tile(request, image_id, level, column, row, tile_format,
fetch_original_file=False, file_mimetype=None, conn=None, **kwargs):
try:
tile_size = int(request.GET.get('tile_size'))
except TypeError:
tile_size = None
try:
limit_bounds = bool(strtobool(request.GET.get('limit_bounds')))
except AttributeError:
limit_bounds = None
if tile_format != settings.DEEPZOOM_FORMAT:
return HttpResponseServerError("Format %s not supported by the server" % tile_format)
rf = RenderingEngineFactory()
rendering_engine = rf.get_primary_tiles_rendering_engine(image_id, conn)
try:
tile, image_format = rendering_engine.get_tile(int(level), int(column), int(row), fetch_original_file,
file_mimetype, tile_size, limit_bounds)
except Exception as e:
logger.error(e)
rendering_engine = rf.get_secondary_tiles_rendering_engine(image_id, conn)
if rendering_engine:
tile, image_format = rendering_engine.get_tile(int(level), int(column), int(row), fetch_original_file,
file_mimetype, tile_size, limit_bounds)
else:
raise e
if tile:
response = HttpResponse(content_type='image/%s' % image_format)
tile.save(response, image_format)
return response
else:
return HttpResponseNotFound('No tile can be found')
@login_required()
def get_image_mpp(request, image_id, fetch_original_file=False, file_mimetype=None, conn=None, **kwargs):
rf = RenderingEngineFactory()
rendering_engine = rf.get_primary_tiles_rendering_engine(image_id, conn)
try:
image_mpp = rendering_engine.get_openseadragon_config(fetch_original_file, file_mimetype)['mpp']
except Exception as e:
rendering_engine = rf.get_secondary_tiles_rendering_engine(image_id, conn)
if rendering_engine:
image_mpp = rendering_engine.get_openseadragon_config(fetch_original_file, file_mimetype)['mpp']
else:
raise e
return HttpResponse(json.dumps({'image_mpp': image_mpp}), content_type='application/json')
@login_required()
def get_slide_bounds(request, image_id, fetch_original_file=False, file_mimetype=None, conn=None, **kwargs):
rf = RenderingEngineFactory()
rendering_engine = rf.get_primary_tiles_rendering_engine(image_id, conn)
try:
slide_bounds = rendering_engine.get_slide_bounds(fetch_original_file, file_mimetype)
except Exception as e:
rendering_engine = rf.get_secondary_tiles_rendering_engine(image_id, conn)
if rendering_engine:
slide_bounds = rendering_engine.get_slide_bounds(fetch_original_file, file_mimetype)
else:
raise e
if slide_bounds:
return HttpResponse(json.dumps(slide_bounds), content_type='application/json')
else:
return HttpResponseNotFound('No image with ID %s' % image_id)
@login_required()
def register_original_file(request, conn=None, **kwargs):
error_on_duplicated = bool(strtobool(request.GET.get('error_on_duplicated', default='false')))
try:
fname = request.GET.get('name')
if not original_files.is_valid_filename(fname):
return HttpResponseBadRequest('Invalid file name received: %s' % fname)
fpath = request.GET.get('path')
fmtype = request.GET.get('mimetype')
if not all([fname, fpath, fmtype]):
return HttpResponseBadRequest('Mandatory field missing')
file_id, file_created = original_files.save_original_file(conn, fname, fpath, fmtype,
int(request.GET.get('size', default=-1)),
request.GET.get('sha1', default='UNKNOWN'),
error_on_duplicated)
return HttpResponse(json.dumps({'omero_id': file_id, 'file_created': file_created}),
content_type='application/json')
except DuplicatedEntryError as dee:
return HttpResponseServerError('%s' % dee)
@login_required()
def register_mirax_slide(request, conn=None, **kwargs):
sname = request.GET.get('slide_name')
error_on_duplicated = bool(strtobool(request.GET.get('error_on_duplicated', default='false')))
if not original_files.is_valid_filename(sname):
return HttpResponseServerError('Invalid slide name received: %s' % sname)
try:
mirax_paths = mirax_files.get_mirax_files_paths(sname)
try:
mirax_file_id, mirax_file_created = original_files.save_original_file(conn, sname, mirax_paths[0],
'mirax/index', -1, 'UNKNOWN',
error_on_duplicated)
try:
mirax_folder_id, mirax_folder_created = original_files.save_original_file(conn, sname, mirax_paths[1],
'mirax/datafolder',
-1, 'UNKNOWN',
error_on_duplicated)
return HttpResponse(
json.dumps({
'mirax_index_omero_id': mirax_file_id,
'mirax_index_created': mirax_file_created,
'mirax_folder_omero_id': mirax_folder_id,
'mirax_folder_created': mirax_folder_created
}),
content_type='application/json'
)
except DuplicatedEntryError as dee:
original_files.delete_original_files(conn, sname, 'mirax/index')
return HttpResponseServerError('{0}'.format(dee))
except DuplicatedEntryError as dee:
return HttpResponseServerError('{0}'.format(dee))
except InvalidMiraxFile as imf:
return HttpResponseServerError('{0}'.format(imf))
except InvalidMiraxFolder as imf:
return HttpResponseServerError('{0}'.format(imf))
except settings.ServerConfigError as sce:
return HttpResponseServerError('{0}'.format(sce))
@login_required()
def get_mirax_slide_details(request, slide_id, conn=None, **kwargs):
mirax_data_folder = get_original_file(conn, slide_id, 'mirax/datafolder')
mirax_index_file = get_original_file(conn, slide_id, 'mirax/index')
slide_details = {
'slide_label': slide_id,
'index_file': {
'omero_id': mirax_index_file.getId(),
'label': mirax_index_file.getName(),
'mimetype': mirax_index_file.getMimetype(),
'hash': mirax_index_file.getHash()
},
'data_folder': {
'omero_id': mirax_data_folder.getId(),
'label': mirax_data_folder.getName(),
'mimetype': mirax_data_folder.getMimetype(),
'hash': mirax_data_folder.getHash()
}
}
return HttpResponse(json.dumps(slide_details), content_type='application/json')
@login_required()
def list_array_datasets(request, conn=None, **kwargs):
datasets = datasets_files.get_datasets(conn)
return HttpResponse(json.dumps(datasets), content_type='application/json')
@login_required()
def register_array_dataset(request, conn=None, **kwargs):
dataset_label = request.GET.get('dataset_label')
error_on_duplicated = bool(strtobool(request.GET.get('error_on_duplicated', default='false')))
if not original_files.is_valid_filename(dataset_label):
return HttpResponseServerError('Invalid dataset name received: {0}'.format(dataset_label))
try:
dataset_path, is_dir = datasets_files.get_dataset_file_path(dataset_label)
if not is_dir:
if bool(strtobool(request.GET.get('extract_archive', default='true'))) == False:
dataset_label, dataset_path = datasets_files.rename_archive(dataset_path)
else:
try:
keep_archive = bool(strtobool(request.GET.get('keep_archive', default='false')))
dataset_label, dataset_path = datasets_files.extract_archive(dataset_path,
keep_archive=keep_archive)
is_dir = True
except datasets_files.DatasetPathAlreadyExistError as dpe:
return HttpResponseServerError('{0}'.format(dpe))
try:
mtype = datasets_files.check_dataset(dataset_path, is_dir)
dataset_id, dataset_created = original_files.save_original_file(conn, dataset_label, dataset_path, mtype,
int(request.GET.get('size', default=-1)),
request.GET.get('sha1', default='UNKNOWN'),
error_on_duplicated)
return HttpResponse(
json.dumps({
'omero_id': dataset_id,
'created': dataset_created,
'mimetype': mtype,
'label': dataset_label,
'path': dataset_path
}),
content_type='application/json'
)
except datasets_files.DatasetFormatError as dfe:
return HttpResponseServerError('{0}'.format(dfe))
except DuplicatedEntryError as dee:
return HttpResponseServerError('{0}'.format(dee))
except datasets_files.InvalidDatasetPath as idp:
return HttpResponseServerError('{0}'.format(idp))
except settings.ServerConfigError as sce:
return HttpResponseServerError('{0}'.format(sce))
@login_required()
def get_original_file_infos(request, file_name, conn=None, **kwargs):
fmtype = request.GET.get('mimetype')
if fmtype is None:
return HttpResponseServerError('Missing mandatory mimetype value to complete the request')
details = original_files.get_original_file_infos(conn, file_name, fmtype)
return HttpResponse(json.dumps(details), content_type='application/json')
@login_required()
def delete_original_file(request, file_name, conn=None, **kwargs):
fmtype = request.GET.get('mimetype')
if fmtype is None:
return HttpResponseServerError('Missing mandatory mimetype value to complete the request')
status, count = original_files.delete_original_files(conn, file_name, fmtype)
return HttpResponse(json.dumps({'success': status, 'deleted_count': count}),
content_type='application/json')
@login_required()
def delete_original_files(request, file_name, conn=None, **kwargs):
status, count = original_files.delete_original_files(conn, file_name)
return HttpResponse(json.dumps({'success': status, 'deleted_count': count}),
content_type='application/json')
def _get_dataset_dzi_description(original_file):
if original_file and original_file.mimetype == 'dataset-folder/tiledb':
dzi_adapter = DZIAdapterFactory('TILEDB').get_adapter(original_file.name)
return dzi_adapter.get_dzi_description()
else:
return None
@login_required()
def get_array_dataset_dzi_by_label(request, dataset_label, conn=None, **kwargs):
try:
original_file = get_original_file(conn, dataset_label)
except DuplicatedEntryError as de_err:
return HttpResponseServerError(str(de_err))
dzi_metadata = _get_dataset_dzi_description(original_file)
if dzi_metadata is not None:
return HttpResponse(dzi_metadata, content_type='application/xml')
else:
return HttpResponseNotFound(f'There is not a valid array dataset with label {dataset_label}')
@login_required()
def get_array_dataset_dzi_by_id(request, dataset_id, conn=None, **kwargs):
original_file = get_original_file_by_id(conn, dataset_id)
dzi_metadata = _get_dataset_dzi_description(original_file)
if dzi_metadata is not None:
return HttpResponse(dzi_metadata, content_type='application/xml')
else:
return HttpResponseNotFound(f'There is not a valid array dataset with ID {dataset_id}')
def _get_tile_from_dataset(original_file, level, row, column, color_palette, threshold):
if original_file and original_file.mimetype == 'dataset-folder/tiledb':
dzi_adapter = DZIAdapterFactory('TILEDB').get_adapter(original_file.name)
return dzi_adapter.get_tile(level, int(row), int(column), color_palette, threshold)
else:
return None
@login_required()
def get_array_dataset_tile_by_label(request, dataset_label, level, row, column, conn=None, **kwargs):
color_palette = request.GET.get('palette')
threshold = request.GET.get('threshold')
if color_palette is None:
return HttpResponseBadRequest('Missing mandatory palette value to complete the request')
try:
original_file = get_original_file(conn, dataset_label)
tile = _get_tile_from_dataset(original_file, level, row, column, color_palette, threshold)
if tile:
response = HttpResponse(content_type='image/png')
tile.save(response, 'png')
return response
else:
return HttpResponseNotFound(f'There is not a valid array dataset with label {dataset_label}')
except DuplicatedEntryError as de_err:
return HttpResponseServerError(str(de_err))
except InvalidColorPalette as cp_error:
return HttpResponseBadRequest(cp_error)
except InvalidAttribute as a_error:
return HttpResponseBadRequest(a_error)
@login_required()
def get_array_dataset_tile_by_id(request, dataset_id, level, row, column, conn=None, **kwargs):
color_palette = request.GET.get('palette')
threshold = request.GET.get('threshold')
if color_palette is None:
return HttpResponseBadRequest('Missing mandatory palette value to complete the request')
try:
original_file = get_original_file_by_id(conn, dataset_id)
tile = _get_tile_from_dataset(original_file, level, row, column, color_palette, threshold)
if tile:
response = HttpResponse(content_type='image/png')
tile.save(response, 'png')
return response
else:
return HttpResponseNotFound(f'There is not a valid array dataset with ID {dataset_id}')
except InvalidColorPalette as cp_error:
return HttpResponseBadRequest(cp_error)
except InvalidAttribute as a_error:
return HttpResponseBadRequest(a_error)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/test_strings.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that we have proper strings like Copyright notices on all the
right files in our distributions.
Note that this is a source file and packaging test, not a functional test,
so the name of this script doesn't end in *Tests.py.
"""
import fnmatch
import os
import os.path
import re
import TestCmd
import TestSCons
# Use TestCmd, not TestSCons, so we don't chdir to a temporary directory.
test = TestCmd.TestCmd()
scons_version = TestSCons.SConsVersion
def build_path(*args):
return os.path.join('build', *args)
build_scons = build_path('scons')
build_local = build_path('scons-local', 'scons-local-'+scons_version)
build_src = build_path('scons-src')
class Checker(object):
def __init__(self, directory,
search_list = [],
remove_list = [],
remove_patterns = []):
self.directory = directory
self.search_list = search_list
self.remove_dict = {}
for r in remove_list:
self.remove_dict[os.path.join(directory, r)] = 1
self.remove_patterns = remove_patterns
def directory_exists(self):
return os.path.exists(self.directory)
def remove_this(self, name, path):
if self.remove_dict.get(path):
return 1
else:
for pattern in self.remove_patterns:
if fnmatch.fnmatch(name, pattern):
return 1
return 0
def search_this(self, path):
if self.search_list:
for pattern in self.search_list:
if fnmatch.fnmatch(path, pattern):
return 1
return None
else:
return os.path.isfile(path)
def find_missing(self):
result = []
for dirpath, dirnames, filenames in os.walk(self.directory):
if '.svn' in dirnames:
dirnames.remove('.svn')
for dname in dirnames[:]:
dpath = os.path.join(dirpath, dname)
if self.remove_this(dname, dpath):
dirnames.remove(dname)
for fname in filenames:
fpath = os.path.join(dirpath, fname)
if self.search_this(fpath) and not self.remove_this(fname, fpath):
body = open(fpath, 'r').read()
for expr in self.expressions:
if not expr.search(body):
msg = '%s: missing %s' % (fpath, repr(expr.pattern))
result.append(msg)
return result
class CheckUnexpandedStrings(Checker):
expressions = [
re.compile('Copyright (c) 2001 - 2016 The SCons Foundation'),
re.compile('src/test_strings.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog'),
]
def must_be_built(self):
return None
class CheckPassTest(Checker):
expressions = [
re.compile(r'\.pass_test()'),
]
def must_be_built(self):
return None
class CheckExpandedCopyright(Checker):
expressions = [
re.compile('Copyright.*The SCons Foundation'),
]
def must_be_built(self):
return 1
check_list = [
CheckUnexpandedStrings(
'src',
search_list = [ '*.py' ],
remove_list = [
'engine/SCons/compat/_scons_sets.py',
'engine/SCons/compat/_scons_subprocess.py',
'engine/SCons/Conftest.py',
'engine/SCons/dblite.py',
],
),
CheckUnexpandedStrings(
'test',
search_list = [ '*.py' ],
),
CheckPassTest(
'test',
search_list = [ '*.py' ],
remove_list = [
'Fortran/common.py',
],
),
CheckExpandedCopyright(
build_scons,
remove_list = [
'build',
'build-stamp',
'configure-stamp',
'debian',
'dist',
'gentoo',
'engine/SCons/compat/_scons_sets.py',
'engine/SCons/compat/_scons_subprocess.py',
'engine/SCons/Conftest.py',
'engine/SCons/dblite.py',
'MANIFEST',
'setup.cfg',
],
# We run epydoc on the *.py files, which generates *.pyc files.
remove_patterns = [
'*.pyc',
]
),
CheckExpandedCopyright(
build_local,
remove_list = [
'SCons/compat/_scons_sets.py',
'SCons/compat/_scons_subprocess.py',
'SCons/Conftest.py',
'SCons/dblite.py',
'scons-%s.egg-info' % scons_version,
],
),
CheckExpandedCopyright(
build_src,
remove_list = [
'bench/timeit.py',
'bin',
'config',
'debian',
'gentoo',
'doc/design',
'doc/MANIFEST',
'doc/python10',
'doc/reference',
'doc/developer/MANIFEST',
'doc/man/MANIFEST',
'doc/user/cons.pl',
'doc/user/MANIFEST',
'doc/user/SCons-win32-install-1.jpg',
'doc/user/SCons-win32-install-2.jpg',
'doc/user/SCons-win32-install-3.jpg',
'doc/user/SCons-win32-install-4.jpg',
'examples',
'gentoo',
'QMTest/classes.qmc',
'QMTest/configuration',
'QMTest/TestCmd.py',
'QMTest/TestCmdTests.py',
'QMTest/TestCommon.py',
'QMTest/TestCommonTests.py',
'src/MANIFEST.in',
'src/setup.cfg',
'src/engine/MANIFEST.in',
'src/engine/MANIFEST-xml.in',
'src/engine/setup.cfg',
'src/engine/SCons/compat/_scons_sets.py',
'src/engine/SCons/compat/_scons_subprocess.py',
'src/engine/SCons/Conftest.py',
'src/engine/SCons/dblite.py',
'src/script/MANIFEST.in',
'src/script/setup.cfg',
'test/Fortran/.exclude_tests',
'timings/changelog.html',
'timings/ElectricCloud/genscons.pl',
'timings/graph.html',
'timings/index.html',
'review.py',
],
remove_patterns = [
'*.js',
]
),
]
missing_strings = []
not_built = []
for collector in check_list:
if collector.directory_exists():
missing_strings.extend(collector.find_missing())
elif collector.must_be_built():
not_built.append(collector.directory)
if missing_strings:
print "Found the following files with missing strings:"
print "\t" + "\n\t".join(missing_strings)
test.fail_test(1)
if not_built:
print "Cannot check all strings, the following have apparently not been built:"
print "\t" + "\n\t".join(not_built)
test.no_result(1)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
import unittest
import os, sys, os.path, time, inspect
from filecmp import dircmp
from tempfile import mkdtemp
from shutil import rmtree, copy2
from zipfile import ZipFile
import mozunit
from JarMaker import JarMaker
if sys.platform == "win32":
import ctypes
from ctypes import POINTER, WinError
DWORD = ctypes.c_ulong
LPDWORD = POINTER(DWORD)
HANDLE = ctypes.c_void_p
GENERIC_READ = 0x80000000
FILE_SHARE_READ = 0x00000001
OPEN_EXISTING = 3
MAX_PATH = 260
class FILETIME(ctypes.Structure):
_fields_ = [("dwLowDateTime", DWORD),
("dwHighDateTime", DWORD)]
class BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("dwVolumeSerialNumber", DWORD),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("nNumberOfLinks", DWORD),
("nFileIndexHigh", DWORD),
("nFileIndexLow", DWORD)]
# http://msdn.microsoft.com/en-us/library/aa363858
CreateFile = ctypes.windll.kernel32.CreateFileA
CreateFile.argtypes = [ctypes.c_char_p, DWORD, DWORD, ctypes.c_void_p,
DWORD, DWORD, HANDLE]
CreateFile.restype = HANDLE
# http://msdn.microsoft.com/en-us/library/aa364952
GetFileInformationByHandle = ctypes.windll.kernel32.GetFileInformationByHandle
GetFileInformationByHandle.argtypes = [HANDLE, POINTER(BY_HANDLE_FILE_INFORMATION)]
GetFileInformationByHandle.restype = ctypes.c_int
# http://msdn.microsoft.com/en-us/library/aa364996
GetVolumePathName = ctypes.windll.kernel32.GetVolumePathNameA
GetVolumePathName.argtypes = [ctypes.c_char_p, ctypes.c_char_p, DWORD]
GetVolumePathName.restype = ctypes.c_int
# http://msdn.microsoft.com/en-us/library/aa364993
GetVolumeInformation = ctypes.windll.kernel32.GetVolumeInformationA
GetVolumeInformation.argtypes = [ctypes.c_char_p, ctypes.c_char_p, DWORD,
LPDWORD, LPDWORD, LPDWORD, ctypes.c_char_p,
DWORD]
GetVolumeInformation.restype = ctypes.c_int
def symlinks_supported(path):
if sys.platform == "win32":
# Add 1 for a trailing backslash if necessary, and 1 for the terminating
# null character.
volpath = ctypes.create_string_buffer(len(path) + 2)
rv = GetVolumePathName(path, volpath, len(volpath))
if rv == 0:
raise WinError()
fsname = ctypes.create_string_buffer(MAX_PATH + 1)
rv = GetVolumeInformation(volpath, None, 0, None, None, None, fsname,
len(fsname))
if rv == 0:
raise WinError()
# Return true only if the fsname is NTFS
return fsname.value == "NTFS"
else:
return True
def _getfileinfo(path):
"""Return information for the given file. This only works on Windows."""
fh = CreateFile(path, GENERIC_READ, FILE_SHARE_READ, None, OPEN_EXISTING, 0, None)
if fh is None:
raise WinError()
info = BY_HANDLE_FILE_INFORMATION()
rv = GetFileInformationByHandle(fh, info)
if rv == 0:
raise WinError()
return info
def is_symlink_to(dest, src):
if sys.platform == "win32":
# Check if both are on the same volume and have the same file ID
destinfo = _getfileinfo(dest)
srcinfo = _getfileinfo(src)
return (destinfo.dwVolumeSerialNumber == srcinfo.dwVolumeSerialNumber and
destinfo.nFileIndexHigh == srcinfo.nFileIndexHigh and
destinfo.nFileIndexLow == srcinfo.nFileIndexLow)
else:
# Read the link and check if it is correct
if not os.path.islink(dest):
return False
target = os.path.abspath(os.readlink(dest))
abssrc = os.path.abspath(src)
return target == abssrc
class _TreeDiff(dircmp):
"""Helper to report rich results on difference between two directories.
"""
def _fillDiff(self, dc, rv, basepath="%s"):
rv['right_only'] += map(lambda l: basepath % l, dc.right_only)
rv['left_only'] += map(lambda l: basepath % l, dc.left_only)
rv['diff_files'] += map(lambda l: basepath % l, dc.diff_files)
rv['funny'] += map(lambda l: basepath % l, dc.common_funny)
rv['funny'] += map(lambda l: basepath % l, dc.funny_files)
for subdir, _dc in dc.subdirs.iteritems():
self._fillDiff(_dc, rv, basepath % (subdir + "/%s"))
def allResults(self, left, right):
rv = {'right_only':[], 'left_only':[],
'diff_files':[], 'funny': []}
self._fillDiff(self, rv)
chunks = []
if rv['right_only']:
chunks.append('%s only in %s' % (', '.join(rv['right_only']),
right))
if rv['left_only']:
chunks.append('%s only in %s' % (', '.join(rv['left_only']),
left))
if rv['diff_files']:
chunks.append('%s differ' % ', '.join(rv['diff_files']))
if rv['funny']:
chunks.append("%s don't compare" % ', '.join(rv['funny']))
return '; '.join(chunks)
class TestJarMaker(unittest.TestCase):
"""
Unit tests for JarMaker.py
"""
debug = True # set to True to debug failing tests on disk
def setUp(self):
self.tmpdir = mkdtemp()
self.srcdir = os.path.join(self.tmpdir, 'src')
os.mkdir(self.srcdir)
self.builddir = os.path.join(self.tmpdir, 'build')
os.mkdir(self.builddir)
self.refdir = os.path.join(self.tmpdir, 'ref')
os.mkdir(self.refdir)
self.stagedir = os.path.join(self.tmpdir, 'stage')
os.mkdir(self.stagedir)
def tearDown(self):
if self.debug:
print self.tmpdir
else:
rmtree(self.tmpdir)
def _jar_and_compare(self, *args, **kwargs):
jm = JarMaker(outputFormat='jar')
kwargs['jardir'] = os.path.join(self.builddir, 'chrome')
if 'topsourcedir' not in kwargs:
kwargs['topsourcedir'] = self.srcdir
jm.makeJars(*args, **kwargs)
cwd = os.getcwd()
os.chdir(self.builddir)
try:
# expand build to stage
for path, dirs, files in os.walk('.'):
stagedir = os.path.join(self.stagedir, path)
if not os.path.isdir(stagedir):
os.mkdir(stagedir)
for file in files:
if file.endswith('.jar'):
# expand jar
stagepath = os.path.join(stagedir, file)
os.mkdir(stagepath)
zf = ZipFile(os.path.join(path, file))
# extractall is only in 2.6, do this manually :-(
for entry_name in zf.namelist():
segs = entry_name.split('/')
fname = segs.pop()
dname = os.path.join(stagepath, *segs)
if not os.path.isdir(dname):
os.makedirs(dname)
if not fname:
# directory, we're done
continue
_c = zf.read(entry_name)
open(os.path.join(dname, fname), 'wb').write(_c)
zf.close()
else:
copy2(os.path.join(path, file), stagedir)
# compare both dirs
os.chdir('..')
td = _TreeDiff('ref', 'stage')
return td.allResults('reference', 'build')
finally:
os.chdir(cwd)
def _create_simple_setup(self):
# create src content
jarf = open(os.path.join(self.srcdir, 'jar.mn'), 'w')
jarf.write('''test.jar:
dir/foo (bar)
''')
jarf.close()
open(os.path.join(self.srcdir,'bar'),'w').write('content\n')
# create reference
refpath = os.path.join(self.refdir, 'chrome', 'test.jar', 'dir')
os.makedirs(refpath)
open(os.path.join(refpath, 'foo'), 'w').write('content\n')
def test_a_simple_jar(self):
'''Test a simple jar.mn'''
self._create_simple_setup()
# call JarMaker
rv = self._jar_and_compare((os.path.join(self.srcdir,'jar.mn'),),
tuple(),
sourcedirs = [self.srcdir])
self.assertTrue(not rv, rv)
def test_a_simple_symlink(self):
'''Test a simple jar.mn with a symlink'''
if not symlinks_supported(self.srcdir):
return
self._create_simple_setup()
jm = JarMaker(outputFormat='symlink')
kwargs = {
'sourcedirs': [self.srcdir],
'topsourcedir': self.srcdir,
'jardir': os.path.join(self.builddir, 'chrome'),
}
jm.makeJars((os.path.join(self.srcdir,'jar.mn'),), tuple(), **kwargs)
# All we do is check that srcdir/bar points to builddir/chrome/test/dir/foo
srcbar = os.path.join(self.srcdir, 'bar')
destfoo = os.path.join(self.builddir, 'chrome', 'test', 'dir', 'foo')
self.assertTrue(is_symlink_to(destfoo, srcbar),
"%s is not a symlink to %s" % (destfoo, srcbar))
def test_k_multi_relative_jar(self):
'''Test the API for multiple l10n jars, with different relative paths'''
# create app src content
def _mangle(relpath):
'method we use to map relpath to srcpaths'
return os.path.join(self.srcdir, 'other-' + relpath)
jars = []
for relpath in ('foo', 'bar'):
ldir = os.path.join(self.srcdir, relpath, 'locales')
os.makedirs(ldir)
jp = os.path.join(ldir, 'jar.mn')
jars.append(jp)
open(jp, 'w').write('''ab-CD.jar:
% locale app ab-CD %app
app/''' + relpath + ' (%' + relpath + ''')
''')
ldir = _mangle(relpath)
os.mkdir(ldir)
open(os.path.join(ldir, relpath), 'w').write(relpath+" content\n")
# create reference
mf = open(os.path.join(self.refdir, 'chrome.manifest'), 'w')
mf.write('manifest chrome/ab-CD.manifest\n')
mf.close()
chrome_ref = os.path.join(self.refdir, 'chrome')
os.mkdir(chrome_ref)
mf = open(os.path.join(chrome_ref, 'ab-CD.manifest'), 'wb')
mf.write('locale app ab-CD jar:ab-CD.jar!/app\n')
mf.close()
ldir = os.path.join(chrome_ref, 'ab-CD.jar', 'app')
os.makedirs(ldir)
for relpath in ('foo', 'bar'):
open(os.path.join(ldir, relpath), 'w').write(relpath+" content\n")
# call JarMaker
difference = self._jar_and_compare(jars,
(_mangle,),
sourcedirs = [])
self.assertTrue(not difference, difference)
if __name__ == '__main__':
mozunit.main()
|
|
# -*- coding: utf-8 -*-
__author__ = 'breddels'
import sys
import vaex
from vaex.ui.qt import *
import vaex.ui.qt as dialogs
import astropy.units
import astropy.io.votable.ucd
import logging
from vaex.ui.icons import iconfile
import vaex.ui.completer
logger = logging.getLogger("vaex.ui.columns")
completerContents = "blaat schaap aap koe".split()
words = astropy.io.votable.ucd.UCDWords()
ucd_words = list(words._primary.union(words._secondary))
ucd_words.sort()
class ColumnsTableModel(QtCore.QAbstractTableModel):
def __init__(self, dataset, parent=None, *args):
"""
:type dataset: Dataset
"""
QtCore.QAbstractTableModel.__init__(self, parent, *args)
self.dataset = dataset
self.row_count_start = 1
self.table_column_names = ["Visible", "Name", "Type", "Units", "UCD", "Description", "Expression"]
self.show_virtual = True
def get_dataset_column_names(self):
return self.dataset.get_column_names(virtual=self.show_virtual, hidden=True, strings=True)
def rowCount(self, parent):
column_names = self.get_dataset_column_names()
return len(column_names)
def columnCount(self, parent):
return len(self.table_column_names) + 1
def setData(self, index, value, role=QtCore.Qt.EditRole):
row = index.row()
column_index = index.column() - 1
column_name = self.get_dataset_column_names()[row]
property = self.table_column_names[column_index]
# print index, value, role
if property == "Visible":
logger.debug("set visibility to: %s", value == QtCore.Qt.Checked)
if property == "Description":
self.dataset.descriptions[column_name] = value
if property == "UCD":
self.dataset.ucds[column_name] = value
# TODO: move to dataset class
self.dataset.signal_column_changed.emit(self.dataset, column_name, "change")
if property == "Units":
if value:
try:
unit = astropy.units.Unit(value)
logger.debug("setting unit to: %s (%s)" % (value, unit))
self.dataset.units[column_name] = unit
# TODO: move to dataset class
self.dataset.signal_column_changed.emit(self.dataset, column_name, "change")
except Exception as e:
dialogs.dialog_error(None, "Cannot parse unit", "Cannot parse unit:\n %s" % e)
else:
if column_name in self.dataset.units:
del self.dataset.units[column_name]
if property == "Expression":
try:
self.dataset.validate_expression(value)
except Exception as e:
dialogs.dialog_error(None, "Invalid expression", "Invalid expression: %s" % e)
# although it may not be a valid expression, still set it to the user can edit it
self.dataset.virtual_columns[column_name] = value
self.dataset.write_meta()
return True
def data(self, index, role=QtCore.Qt.DisplayRole):
# row_offset = self.get_row_offset()
# print index, role
if not index.isValid():
return None
if role == QtCore.Qt.CheckStateRole and index.column() == 1:
row = index.row()
column_name = self.get_dataset_column_names()[row]
return QtCore.Qt.Checked if not column_name.startswith("__") else QtCore.Qt.Unchecked
elif role not in [QtCore.Qt.DisplayRole, QtCore.Qt.EditRole]:
return None
if index.column() == 0:
# return "{:,}".format(index.row()+self.row_count_start + row_offset)
return str(index.row() + self.row_count_start)
else:
row = index.row()
column_index = index.column() - 1
column_name = self.get_dataset_column_names()[row]
property = self.table_column_names[column_index]
column = None
# if column_name in self.dataset.get_column_names():
# column = self.dataset.columns[column_name]
# if property == "Visible":
# return QtCore.Qt.Checked
if property == "Name":
return column_name
elif property == "Type":
if column_name in self.dataset.get_column_names(strings=True):
dtype = self.dataset.data_type(column_name)
return dtype.name
# return str(self.dataset.data_type(column_name))
else:
return "virtual column"
elif property == "Units":
unit = self.dataset.unit(column_name)
return str(unit) if unit else ""
elif property == "UCD":
return self.dataset.ucds.get(column_name, "")
elif property == "Description":
return self.dataset.descriptions.get(column_name, "")
elif property == "Expression":
return self.dataset.virtual_columns.get(column_name, "")
def flags(self, index):
row = index.row()
column_index = index.column() - 1
if column_index == 0:
return QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
column_name = self.get_dataset_column_names()[row]
property = self.table_column_names[column_index]
column = None
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
if property in ["Description", "Units", "UCD"]:
flags |= QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable
if column_name in self.dataset.virtual_columns:
flags |= QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable
return flags
def headerData(self, index, orientation, role):
# row_offset = self.get_row_offset()
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
if index == 0:
return ""
else:
return self.table_column_names[index - 1]
# if orientation == QtCore.Qt.Vertical and role == QtCore.Qt.DisplayRole:
# return str(index+self.row_count_start + row_offset)
return None
def insertRows(self, *args):
return True
class ColumnsTable(QtGui.QWidget):
def set_dataset(self, dataset):
if self.event_handler:
self.dataset.signal_column_changed.disconnect(self.event_handler)
self.dataset = dataset
self.tableModel = ColumnsTableModel(self.dataset, self)
self.tableView.setModel(self.tableModel)
self.tableView.selectionModel().currentChanged.connect(self.onCurrentChanged)
self.tableView.resizeColumnsToContents()
# self.tableView.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch);
self.tableView.horizontalHeader().setStretchLastSection(True)
self.event_handler = self.dataset.signal_column_changed.connect(self.on_column_change)
def on_column_change(self, *args):
self.reset()
pass
def __init__(self, parent, menu=None):
super(ColumnsTable, self).__init__(parent)
# dataset.add_virtual_column("xp", "x")
self.event_handler = None
self.resize(700, 500)
self.tableView = QtGui.QTableView()
# self.tableView.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows);
# self.header = self.dataset.column_names
# self.tableView.pressed.connect(self.onSelectRow)
if qt_mayor == 5:
self.tableView.verticalHeader().setSectionResizeMode(QtGui.QHeaderView.Interactive)
else:
self.tableView.verticalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
self.unit_delegate = vaex.ui.completer.UnitDelegate(self.tableView)
self.ucd_delegate = vaex.ui.completer.UCDDelegate(self.tableView)
self.tableView.setItemDelegateForColumn(4, self.unit_delegate)
self.tableView.setItemDelegateForColumn(5, self.ucd_delegate)
self.toolbar = QtGui.QToolBar(self)
# self.description = QtGui.QTextEdit(self.dataset.description, self)
# self.description.setFixedHeight(100)
# self.description.textChanged.connect(self.onTextChanged)
# self.action_group_add = QtGui.QActionGroup(self)
self.action_add = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Add virtual column', self)
self.action_remove = QtGui.QAction(QtGui.QIcon(iconfile('table-delete-column')), 'Remove virtual column', self)
self.action_remove.setEnabled(False)
# self.action_add.setShortcut("Ctrl++")
self.action_remove.setShortcut("Ctrl+-")
self.toolbar.addAction(self.action_add)
self.toolbar.addAction(self.action_remove)
self.action_add_menu = QtGui.QMenu()
self.action_add.setMenu(self.action_add_menu)
self.action_normal = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Add virtual column', self)
self.action_normal.setShortcut("Ctrl++")
self.action_add.menu().addAction(self.action_normal)
self.action_normal.triggered.connect(self.onAdd)
self.action_celestial = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Equatorial to galactic', self)
self.action_celestial.setShortcut("Ctrl+G")
self.action_add.menu().addAction(self.action_celestial)
self.action_celestial.triggered.connect(lambda *args: add_celestial(self, self.dataset))
self.action_eq2ecl = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Equatorial to ecliptic', self)
# self.action_eq2ecl.setShortcut("Ctrl+G")
self.action_add.menu().addAction(self.action_eq2ecl)
self.action_eq2ecl.triggered.connect(lambda *args: add_celestial_eq2ecl(self, self.dataset))
self.action_car_to_gal = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Cartesian to galactic', self)
self.action_car_to_gal.setShortcut("Ctrl+S")
self.action_add.menu().addAction(self.action_car_to_gal)
self.action_car_to_gal.triggered.connect(lambda *args: add_sky(self, self.dataset, True))
self.action_par_to_dis = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Parallax to distance', self)
self.action_par_to_dis.setShortcut("Ctrl+D")
self.action_add.menu().addAction(self.action_par_to_dis)
self.action_par_to_dis.triggered.connect(lambda *args: add_distance(self, self.dataset))
self.action_gal_to_car = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Galactic to cartesian', self)
self.action_gal_to_car.setShortcut("Ctrl+C")
self.action_add.menu().addAction(self.action_gal_to_car)
self.action_gal_to_car.triggered.connect(lambda *args: add_cartesian(self, self.dataset, True))
self.action_gal_to_aitoff = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Galactic to Aitoff projection', self)
self.action_gal_to_aitoff.setShortcut("Ctrl+A")
self.action_add.menu().addAction(self.action_gal_to_aitoff)
self.action_gal_to_aitoff.triggered.connect(lambda *args: add_aitoff(self, self.dataset, True))
self.action_eq2gal_pm = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Equatorial to galactic proper motion', self)
# self.action_gal_to_aitoff.setShortcut("Ctrl+A")
self.action_add.menu().addAction(self.action_eq2gal_pm)
self.action_eq2gal_pm.triggered.connect(lambda *args: add_proper_motion_eq2gal(self, self.dataset))
# action_group_add.add(self.action_add)
self.action_add.triggered.connect(self.onAdd)
self.action_remove.triggered.connect(self.onRemove)
if menu:
menu.addAction(self.action_add)
menu.addAction(self.action_remove)
# self.tableView.pressed.connect(self.onSelectRow)
# self.tableView.activated.connect(self.onActivateRow)
# self.tableView.selectionModel().currentChanged.connect(self.onCurrentChanged)
self.boxlayout = QtGui.QVBoxLayout(self)
self.boxlayout.addWidget(self.toolbar, 0)
# self.boxlayout.addWidget(self.description, 0)
self.boxlayout.addWidget(self.tableView, 1)
self.setLayout(self.boxlayout)
self.tableView.resizeColumnsToContents()
# self.tableView.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch);
self.tableView.horizontalHeader().setStretchLastSection(True)
def onTextChanged(self, *args):
self.dataset.description = self.description.toPlainText()
logger.debug("setting description to: %s", self.dataset.description)
self.dataset.write_meta()
def onSelectRow(self, model):
row_index = model.row()
logger.debug("row index selected %d" % row_index)
def onCurrentChanged(self, model, previous):
# row_index = model.row()
# logger.debug("row index activated %d" % row_index)
self.check_remove()
def check_remove(self):
model = self.tableView.selectionModel().currentIndex()
column_names = self.tableModel.get_dataset_column_names()
column_name = column_names[model.row()]
self.action_remove.setEnabled(column_name in self.dataset.virtual_columns)
def onRemove(self, _=None):
model = self.tableView.selectionModel().currentIndex()
column_names = self.tableModel.get_dataset_column_names()
column_name = column_names[model.row()]
logger.debug("removing %s", column_name)
# del self.dataset.virtual_columns[column_name]
self.dataset.delete_virtual_column(column_name)
# self.reset()
self.check_remove()
def reset(self):
self.tableModel.beginResetModel()
self.tableView.reset()
self.tableModel.endResetModel()
def onAdd(self, _=None):
dialog = QuickDialog(self, title="Add virtual column")
dialog.add_text("name", "Column name", make_unique("user", self.dataset))
dialog.add_expression("expression", "Expression", "sqrt(%s)" % self.dataset.get_column_names()[0], self.dataset)
# dialog.add_unit("unit", "Expression", "sqrt(%s)" % self.dataset.get_column_names()[0], self.dataset)
dialog.add_ucd("ucd", "UCD", "")
dialog.add_text("description", "Description", placeholder="Enter a description")
values = dialog.get()
if values:
if values["description"]:
self.dataset.descriptions[values["name"]] = values["description"]
if values["ucd"]:
self.dataset.ucds[values["name"]] = values["ucd"]
self.dataset.add_virtual_column(values["name"], values["expression"])
def onAddCelestial(self, *args):
add_celestial(self, self.dataset)
def add_celestial_eq2ecl(parent, dataset):
add_celestial(parent, dataset, type="ecliptic")
def add_celestial(parent, dataset, type="galactic"):
result = dataset.ucd_find(["^pos.eq.ra", "^pos.eq.dec"])
column_names = dataset.get_column_names(virtual=True)
if result is None:
result = ["", ""]
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier and result is not None:
values = dict(ra=result[0], dec=result[1], l="l", b="b", degrees="degrees")
else:
dialog = QuickDialog(parent, title="Celestial transform: equatorial to %s" % type)
# dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
logger.debug("unit = %s", dataset.unit(column_names[0], default=astropy.units.deg))
logger.debug("unit = %s", dataset.unit(column_names[0], default=astropy.units.deg) == astropy.units.rad)
radians = (dataset.unit(result[0], default=astropy.units.deg) == astropy.units.rad)
if radians:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"][::-1])
else:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
dialog.add_expression("ra", "Right ascension", result[0], dataset)
dialog.add_expression("dec", "Declination", result[1], dataset)
if type == "galactic":
dialog.add_text("l", "Galactic l", "l")
dialog.add_text("b", "Galactic b", "b")
else:
dialog.add_text("l", "Ecliptic ra", "lambda_")
dialog.add_text("b", "Ecliptic dec", "beta")
values = dialog.get()
if values:
dataset.ucds[values["l"]] = "pos.%s.lon" % type
dataset.ucds[values["b"]] = "pos.%s.lat" % type
dataset.units[values["l"]] = astropy.units.deg if values["degrees"] == "degrees" else astropy.units.rad
dataset.units[values["b"]] = astropy.units.deg if values["degrees"] == "degrees" else astropy.units.rad
if type == "galactic":
dataset.add_virtual_columns_celestial(long_in=values["ra"], lat_in=values["dec"],
long_out=values["l"], lat_out=values["b"],
radians=values["degrees"] == "radians")
else:
dataset.add_virtual_columns_eq2ecl(long_in=values["ra"], lat_in=values["dec"],
long_out=values["l"], lat_out=values["b"],
radians=values["degrees"] == "radians")
def add_distance(parent, dataset):
parallax = dataset.ucd_find(["pos.parallax"])
column_names = dataset.get_column_names(virtual=True)
if parallax is None:
parallax = ""
unit = dataset.unit(parallax)
distance_name = make_unique("distance", dataset)
if unit:
convert = unit.to(astropy.units.mas)
distance_expression = "%f/(%s)" % (convert, parallax)
else:
distance_expression = "1/(%s)" % (parallax)
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier and parallax is not None:
values = dict(distance=distance_name, parallax=parallax)
else:
dialog = QuickDialog(parent, title="Parallax to distance transform")
# dialog.add_combo("parallax", "Input in", ["degrees", "radians"])
dialog.add_expression("parallax", "Parallax", parallax, dataset)
dialog.add_text("distance", "Distance name", distance_name)
values = dialog.get()
if values:
dataset.ucds[values["distance"]] = "pos.distance"
if unit:
if unit == astropy.units.milliarcsecond:
dataset.units["distance"] = astropy.units.kpc
if unit == astropy.units.arcsecond:
dataset.units["distance"] = astropy.units.parsec
dataset.add_virtual_column(values["distance"], distance_expression)
def add_cartesian(parent, dataset, galactic=True):
if galactic:
spherical = [dataset.ucd_find(["pos.distance"]), dataset.ucd_find(["pos.galactic.lon"]), dataset.ucd_find(["pos.galactic.lat"])]
else:
spherical = [dataset.ucd_find(["pos.distance"]), dataset.ucd_find(["pos.eq.ra"]), dataset.ucd_find(["pos.eq.dec"])]
column_names = dataset.get_column_names(virtual=True)
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier and None not in spherical:
values = dict(alpha=spherical[1], delta=spherical[2], distance=spherical[0], x="x", y="y", z="z",
degrees="degrees", solar_pos=repr(default_solar_position)
)
else:
dialog = QuickDialog(parent, title="Spherical to cartesian transform")
if spherical[1]:
radians = dataset.unit(spherical[1], default=astropy.units.deg) == astropy.units.rad
if radians:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"][::-1])
else:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
else:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
dialog.add_expression("distance", "Distance", spherical[0], dataset)
dialog.add_expression("alpha", "Alpha", spherical[1], dataset)
dialog.add_expression("delta", "Delta", spherical[2], dataset)
# TODO: 8 should be in proper units
dialog.add_combo_edit("solar_pos", "Solar position (x,y,z)", repr(default_solar_position), column_names)
dialog.add_text("x", "x", make_unique("x", dataset))
dialog.add_text("y", "y", make_unique("y", dataset))
dialog.add_text("z", "z", make_unique("z", dataset))
values = dialog.get()
if values:
pos = "pos.galactocentric" if galactic else "pos.heliocentric"
if 0:
units = dataset.unit(values["distance"])
if units:
dataset.units[values["x"]] = units
dataset.units[values["y"]] = units
dataset.units[values["z"]] = units
dataset.ucds[values["x"]] = "pos.cartesian.x;%s" % pos
dataset.ucds[values["y"]] = "pos.cartesian.y;%s" % pos
dataset.ucds[values["z"]] = "pos.cartesian.z;%s" % pos
solar_position = eval(values["solar_pos"])
dataset.add_virtual_columns_spherical_to_cartesian(values["alpha"], values["delta"], values["distance"],
values["x"], values["y"], values["z"],
center=solar_position,
radians=values["degrees"] == "radians")
def add_cartesian_velocities(parent, dataset, galactic=True):
if galactic:
ucds = ["pos.distance", "^pos.galactic.lon", "^pos.galactic.lat", "pos.pm;pos.galactic.lon", "pos.pm;pos.galactic.lat", "spect.dopplerVeloc"]
else:
raise NotImplementedError("is this useful?")
spherical = [dataset.ucd_find([ucd]) for ucd in ucds]
column_names = dataset.get_column_names(virtual=True)
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier and None not in spherical:
values = dict(alpha=spherical[1], delta=spherical[2], distance=spherical[0], pm_alpha=spherical[3], pm_delta=spherical[4], vr=spherical[5],
degrees="degrees", solar_pos=repr(default_solar_position)
)
else:
dialog = QuickDialog(parent, title="Spherical motion to cartesian velocity")
if spherical[1]:
radians = dataset.unit(spherical[1], default=astropy.units.deg) == astropy.units.rad
if radians:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"][::-1])
else:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
else:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
dialog.add_expression("distance", "Distance", spherical[0], dataset)
dialog.add_expression("alpha", "Alpha", spherical[1], dataset)
dialog.add_expression("delta", "Delta", spherical[2], dataset)
dialog.add_expression("pm_alpha", "pm_Alpha*", spherical[3], dataset)
dialog.add_expression("pm_delta", "pm_Delta", spherical[4], dataset)
dialog.add_expression("vr", "radial velocity", spherical[5], dataset)
# TODO: 8 should be in proper units
dialog.add_combo_edit("solar_velocity", "Solar velocity (vx,vy,vz)", default_solar_velocity, column_names)
dialog.add_text("vx", "vx_gal", make_unique("vx_gal", dataset))
dialog.add_text("vy", "vy_gal", make_unique("vy_gal", dataset))
dialog.add_text("vz", "vz_gal", make_unique("vz_gal", dataset))
values = dialog.get()
if values:
pos = "pos.galactocentric" if galactic else "pos.heliocentric"
if 0:
units = dataset.unit(values["distance"])
if units:
dataset.units[values["x"]] = units
dataset.units[values["y"]] = units
dataset.units[values["z"]] = units
dataset.ucds[values["vx"]] = "phys.veloc;pos.cartesian.x;%s" % pos
dataset.ucds[values["vy"]] = "phys.veloc;pos.cartesian.y;%s" % pos
dataset.ucds[values["vz"]] = "phys.veloc;pos.cartesian.z;%s" % pos
solar_velocity = eval(values["solar_velocity"])
dataset.add_virtual_columns_lbrvr_proper_motion2vcartesian(values["alpha"], values["delta"], values["distance"],
values["pm_alpha"], values["pm_delta"], values["vr"],
values["vx"], values["vy"], values["vz"],
center_v=solar_velocity,
radians=values["degrees"] == "radians")
def make_unique(name, dataset):
postfix = ""
number = 2
original_name = name
while name in dataset.get_column_names(virtual=True):
name = original_name + "_" + str(number)
number += 1
return name
default_solar_position = (-8, 0, 0)
default_solar_velocity = "(10., 220+5.2, 7.2)"
def add_sky(parent, dataset, galactic=True):
if galactic:
pos = "pos.galactocentric"
else:
pos = "pos.heliocentric"
cartesian = [dataset.ucd_find(["pos.cartesian.x;%s" % pos]), dataset.ucd_find(["pos.cartesian.y;%s" % pos]),
dataset.ucd_find(["pos.cartesian.z;%s" % pos])]
column_names = dataset.get_column_names(virtual=True)
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier and None not in cartesian:
values = dict(x=cartesian[0], y=cartesian[1], z=cartesian[2],
alpha=make_unique("l" if galactic else "alpha", dataset),
delta=make_unique("b" if galactic else "delta", dataset),
distance=make_unique("distance", dataset),
solar_pos=repr(default_solar_position),
degrees="degrees")
else:
dialog = QuickDialog(parent, title="Cartesian to spherical transform")
dialog.add_expression("x", "x", cartesian[0], dataset)
dialog.add_expression("y", "y", cartesian[1], dataset)
dialog.add_expression("z", "z", cartesian[2], dataset)
# TODO: 8 should be in proper units
dialog.add_combo_edit("solar_pos", "Solar position (x,y,z)", repr(default_solar_position), [])
dialog.add_combo("degrees", "Output in", ["degrees", "radians"])
dialog.add_text("distance", "Distance", make_unique("distance", dataset))
dialog.add_text("alpha", "Alpha", make_unique("l" if galactic else "alpha", dataset))
dialog.add_text("delta", "Delta", make_unique("b" if galactic else "delta", dataset))
values = dialog.get()
if values:
units = dataset.unit(values["x"])
pos = "pos.galactocentric" if galactic else "pos.heliocentric"
dataset.units[values["alpha"]] = astropy.units.deg if values["degrees"] == "degrees" else astropy.units.rad
dataset.units[values["delta"]] = astropy.units.deg if values["degrees"] == "degrees" else astropy.units.rad
if units:
dataset.units[values["distance"]] = units
dataset.ucds[values["distance"]] = "pos.distance;%s" % pos
dataset.ucds[values["alpha"]] = "pos.galactic.lon" if galactic else "pos.eq.ra"
dataset.ucds[values["delta"]] = "pos.galactic.lat" if galactic else "pos.eq.dec"
solar_position = eval(values["solar_pos"])
dataset.add_virtual_columns_cartesian_to_spherical(values["x"], values["y"], values["z"],
values["alpha"], values["delta"], values["distance"],
radians=values["degrees"] == "radians", center=solar_position)
def add_aitoff(parent, dataset, galactic=True):
if galactic:
spherical = [dataset.ucd_find(["pos.galactic.lon"]), dataset.ucd_find(["pos.galactic.lat"])]
else:
spherical = [dataset.ucd_find(["pos.eq.ra"]), dataset.ucd_find(["pos.eq.dec"])]
column_names = dataset.get_column_names(virtual=True)
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier and None not in spherical:
values = dict(alpha=spherical[0], delta=spherical[1], x="x_aitoff", y="y_aitoff", degrees="degrees")
else:
dialog = QuickDialog(parent, title="Spherical to cartesian transform")
if spherical[1]:
radians = dataset.unit(spherical[1], default=astropy.units.deg) == astropy.units.rad
if radians:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"][::-1])
else:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
else:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
dialog.add_expression("alpha", "Alpha", spherical[0], dataset)
dialog.add_expression("delta", "Delta", spherical[1], dataset)
dialog.add_text("x", "x", make_unique("x_aitoff", dataset))
dialog.add_text("y", "y", make_unique("y_aitoff", dataset))
values = dialog.get()
if values:
# pos = "pos.galactic" if galactic else "pos.eq"
# dataset.ucds[values["x"]] = "pos.cartesian.x;%s" % pos
# dataset.ucds[values["y"]] = "pos.cartesian.y;%s" % pos
# dataset.ucds[values["z"]] = "pos.cartesian.z;%s" % pos
alpha = values["alpha"]
if galactic: # go from 0-360 to -180-180
if values["degrees"] == "radians":
alpha = "((%s+pi) %% (2*pi) - pi)" % values["alpha"]
else:
alpha = "((%s+180) %% 360 - 180)" % values["alpha"]
dataset.add_virtual_columns_aitoff(alpha, values["delta"],
values["x"], values["y"], radians=values["degrees"] == "radians")
def add_proper_motion_eq2gal(parent, dataset, type="galactic"):
assert type == "galactic"
default_columns = dataset.ucd_find(["^pos.eq.ra", "^pos.eq.dec", "pos.pm;pos.eq.ra", "pos.pm;pos.eq.dec"])
column_names = dataset.get_column_names(virtual=True)
if default_columns is None:
default_columns = ["", "", "", ""]
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier and default_columns is not None:
values = dict(alpha=default_columns[0], delta=default_columns[1], pm_alpha=default_columns[2], pm_delta=default_columns[3], pm_alpha_out="pm_l", pm_delta_out="pm_b", degrees="degrees")
else:
dialog = QuickDialog(parent, title="Proper motion transform: equatorial to %s" % type)
# dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
# logger.debug("unit = %s", dataset.unit(column_names[0], default=astropy.units.deg))
# logger.debug("unit = %s", dataset.unit(column_names[0], default=astropy.units.deg) == astropy.units.rad)
radians = (dataset.unit(default_columns[0], default=astropy.units.deg) == astropy.units.rad)
if radians:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"][::-1])
else:
dialog.add_combo("degrees", "Input in", ["degrees", "radians"])
dialog.add_expression("alpha", "Right ascension", default_columns[0], dataset)
dialog.add_expression("delta", "Declination", default_columns[1], dataset)
if type == "galactic":
dialog.add_expression("pm_alpha", "pm_ra", default_columns[2], dataset)
dialog.add_expression("pm_delta", "pm_dec", default_columns[3], dataset)
dialog.add_text("pm_alpha_out", "pm_long name", "pm_l")
dialog.add_text("pm_delta_out", "pm_lat name", "pm_b")
else:
# dialog.add_text("l", "Ecliptic ra", "ra_lambda")
# dialog.add_text("b", "Ecliptic dec", "dec_beta")
pass
values = dialog.get()
if values:
dataset.ucds[values["pm_alpha_out"]] = "pos.pm;pos.galactic.lon" # % type
dataset.ucds[values["pm_delta_out"]] = "pos.pm;pos.galactic.lat" # % type
dataset.units[values["pm_alpha_out"]] = dataset.unit(values["pm_alpha"])
dataset.units[values["pm_delta_out"]] = dataset.unit(values["pm_delta"])
if type == "galactic":
dataset.add_virtual_columns_proper_motion_eq2gal(long_in=values["alpha"], lat_in=values["delta"],
pm_long=values["pm_alpha"], pm_lat=values["pm_delta"],
pm_long_out=values["pm_alpha_out"], pm_lat_out=values["pm_delta_out"],
radians=values["degrees"] == "radians")
else:
pass
# dataset.add_virtual_columns_eq2ecl(long_in=values["ra"], lat_in=values["dec"],
# long_out=values["l"], lat_out=values["b"],
# radians=values["degrees"] == "radians")
def main(argv=sys.argv):
dataset = vaex.open(argv[1])
app = QtGui.QApplication(argv)
table = ColumnsTable(None)
table.set_dataset(dataset)
table.show()
table.raise_()
sys.exit(app.exec_())
if __name__ == "__main__":
vaex.set_log_level_debug()
main()
for i in range(3):
for j in range(3):
dataset.add_virtual_column("bla_%s%s" % (i, j), expr_matrix[i, j])
dataset.add_virtual_columns_matrix3d("vx", "vy", "vz", "mu_alpha", "mu_delta", "vr", "bla")
|
|
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2021 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# parts from this software (AIS decoding) are taken from the gpsd project
# so refer to this BSD licencse also (see ais.py) or omit ais.py
###############################################################################
import time
from avnav_util import *
from avnav_nmea import *
from avnav_worker import *
hasSerial=False
try:
import serial
import serial.tools.list_ports
hasSerial=True
except:
pass
import avnav_handlerList
#a reader class to read from a serial port using pyserial
#on windows use an int for the port - e.g. use 4 for COM5
#on linux use the device name for the port
#if no data is received within timeout *10 the port is closed and reopened
#this gives the chance to handle dynamically assigned ports with no issues
#this class is not directly a worker that can be instantiated from the config
#instead it is used by worker classes to handle serial input
#it also contains our internal converting routines
class SerialReader(object):
BAUDRATES=[460800,230400,115200,57600,38400,19200,9600,4800]
P_XONOFF=WorkerParameter('xonxoff', False,type=WorkerParameter.T_BOOLEAN)
P_RTSCTS=WorkerParameter('rtscts',False,type=WorkerParameter.T_BOOLEAN)
@classmethod
def getConfigParam(cls):
cfg=[
WorkerParameter('port',None,type=WorkerParameter.T_SELECT,rangeOrList=[]),
WorkerParameter('timeout', 2,type=WorkerParameter.T_FLOAT,
description="serial receive timeout in s, after 10*timeout port will be reopened"),
WorkerParameter('baud',4800,type=WorkerParameter.T_SELECT,rangeOrList=cls.BAUDRATES),
WorkerParameter('minbaud',0,type=WorkerParameter.T_SELECT,rangeOrList=cls.BAUDRATES+[0],
description='if this is set to anything else then 0, try autobauding between baud and minbaud'),
WorkerParameter('bytesize', 8,type=WorkerParameter.T_SELECT,rangeOrList=[5,6,7,8]),
WorkerParameter('parity','N',type=WorkerParameter.T_SELECT,rangeOrList=['N','E','O','M','S']),
WorkerParameter('stopbits', 1,type=WorkerParameter.T_SELECT,rangeOrList=[1,1.5,2]),
cls.P_XONOFF,
cls.P_RTSCTS,
WorkerParameter('numerrors',20,type=WorkerParameter.T_NUMBER,
description='reopen port after that many errors, set this to 0 to avoid any check for NMEA data'),
WorkerParameter('autobaudtime', 5,type=WorkerParameter.T_FLOAT,
description='use that many seconds to read data for autobauding if no newline is found'),
WorkerParameter('filter',"",type=WorkerParameter.T_FILTER)
]
return cfg
@classmethod
def listSerialPorts(cls):
if not hasSerial:
return []
ports=serial.tools.list_ports.comports()
rt=[]
for p in ports:
rt.append(p.device)
return rt
#parameters:
#param - the config dict
#navdata - a nav data object (can be none if this reader doesn not directly write)
#a write data method used to write a received line
def __init__(self,param,writeData,infoHandler,sourceName):
for p in ('port','timeout'):
if param.get(p) is None:
raise Exception("missing "+p+" parameter for serial reader")
self.param=param
self.writeData=writeData
self.infoHandler=infoHandler
self.sourceName=sourceName
if self.writeData is None:
raise Exception("writeData has to be set")
self.startpattern=AVNUtil.getNMEACheck()
self.doStop=False
self.setInfo("created",WorkerStatus.INACTIVE)
self.device=None
def getName(self):
return "SerialReader-"+self.param['name']
def stopHandler(self):
self.doStop=True
try:
if self.device is not None:
self.device.close()
except Exception as e:
AVNLog.debug("unable to close serial device: %s",str(e))
# a simple approach for autobauding
# we try to read some data (~3 lines) and find a 0x0a in it
# once we found this, we expect $ or ! and five capital letters afterwards
# if not found we try the next lower baudrate
def openDevice(self,baud,autobaud,init=False):
self.buffer=''
self.device=None
try:
pnum=int(self.param['port'])
except:
pnum=self.param['port']
bytesize=int(self.param['bytesize'])
parity=self.param['parity']
stopbits=int(self.param['stopbits'])
xonxoff=self.P_XONOFF.fromDict(self.param)
rtscts=self.P_RTSCTS.fromDict(self.param)
portname=self.param['port']
timeout=float(self.param['timeout'])
autobaudtime=float(self.param['autobaudtime'])
name=self.getName()
if init:
AVNLog.info("openDevice for port %s, baudrate=%d, timeout=%f, autobaud=%s",
portname,baud,timeout,"true" if autobaud else "false")
init=False
else:
AVNLog.debug("openDevice for port %s, baudrate=%d, timeout=%f , autobaud=%s",portname,baud,timeout,
"true" if autobaud else "false")
lastTime=time.time()
try:
self.setInfo("reader opening at %d baud"%(baud),WorkerStatus.STARTED)
self.device=serial.Serial(pnum, timeout=timeout, baudrate=baud, bytesize=bytesize, parity=parity, stopbits=stopbits, xonxoff=xonxoff, rtscts=rtscts)
self.setInfo("reader port open at %d baud"%baud,WorkerStatus.STARTED)
if autobaud:
starttime=time.time()
while time.time() <= (starttime + autobaudtime):
bytes=self.device.read(300)
if self.doStop:
try:
self.device.close()
except:
pass
return None
if len(bytes)==0:
#if there is no data at all we simply take all the time we have...
AVNLog.debug("unable to read data, retrying at %d",baud)
continue
data=bytes.decode('ascii','ignore')
curoffset=0
while curoffset < (len(data)-5):
pos=data.find('\n',curoffset)
curoffset+=1
if pos < 0:
AVNLog.debug("no newline at baud %d in %s",baud,data)
break
curoffset=pos+1
match=self.startpattern.search(data,curoffset)
if not match:
continue
AVNLog.debug("assumed startpattern %s at baud %d in %s",match.group(0),baud,data)
AVNLog.info("autobaud successfully finished at baud %d",baud)
self.setInfo("reader receiving at %d baud"%(baud),WorkerStatus.STARTED)
return self.device
self.device.close()
return None
#hmm - seems that we have not been able to autobaud - return anyway
return self.device
except Exception:
self.setInfo("unable to open port",WorkerStatus.ERROR)
try:
tf=traceback.format_exc(3)
except:
tf="unable to decode exception"
AVNLog.debug("Exception on opening %s : %s",portname,tf)
if self.device is not None:
try:
self.device.close()
except:
pass
self.device=None
return self.device
def readLine(self,serialDevice,timeout):
#if not os.name=='posix':
return serialDevice.readline(300)
#the run method - just try forever
def run(self):
threading.current_thread().setName("%s"%self.getName())
self.device=None
init=True
isOpen=False
AVNLog.debug("started with param %s",",".join(str(i)+"="+str(self.param[i]) for i in list(self.param.keys())))
self.setInfo("created",WorkerStatus.STARTED)
filterstr=self.param.get('filter')
filter=None
if filterstr != "":
filter=filterstr.split(',')
try:
while not self.doStop:
name=self.getName()
timeout=float(self.param['timeout'])
portname=self.param['port']
porttimeout=timeout*10
baud=int(self.param['baud'])
maxerrors=int(self.param['numerrors'])
minbaud=int(self.param.get('minbaud') or baud)
rates=self.BAUDRATES
autobaud=False
if minbaud != baud and minbaud != 0:
autobaud=True
if not baud in rates or not minbaud in rates:
AVNLog.debug("minbaud/baud not in allowed rates %s","".join(str(f) for f in rates))
autobaud=False
if minbaud >= baud:
AVNLog.debug("minbaud >= baud")
autobaud=False
if autobaud:
baudidx=0
while rates[baudidx] > baud:
baudidx+=1
while baudidx < len(rates) and rates[baudidx] >= minbaud and not self.doStop:
f=self.openDevice(rates[baudidx],True,init)
init=False
baudidx+=1
if not f is None:
break
else:
self.openDevice(baud,False,init)
init=False
if self.doStop:
AVNLog.info("handler stopped, leaving")
self.setInfo("reader stopped for %s"%portname,WorkerStatus.INACTIVE)
try:
self.device.close()
except:
pass
break
if self.device is None:
time.sleep(min(porttimeout/2,5))
continue
AVNLog.debug("%s opened, start receiving data",self.device.name)
lastTime=time.time()
numerrors=0
hasNMEA=False
MAXLEN=500
buffer=b''
while not self.doStop:
bytes=b''
try:
bytes=self.readLine(self.device,timeout)
if len(buffer) > 0:
bytes=buffer+bytes
buffer=''
if len(bytes) > 0 and bytes.find(b"\n") <0:
if len(bytes) < MAXLEN:
continue
raise Exception("no newline in serial data")
except Exception as e:
AVNLog.debug("Exception %s in serial read, close and reopen %s",traceback.format_exc(),portname)
try:
self.device.close()
isOpen=False
except:
pass
break
if not bytes is None and len(bytes)> 0:
if not hasNMEA:
self.setInfo("reader receiving %s at %d baud"%(portname,self.device.baudrate),WorkerStatus.STARTED)
if not isOpen:
AVNLog.info("successfully opened %s",self.device.name)
isOpen=True
self.status=True
data=bytes.decode('ascii','ignore').translate(NMEAParser.STRIPCHARS)
if maxerrors > 0 or not hasNMEA:
if not self.startpattern.match(data):
if maxerrors>0:
numerrors+=1
if numerrors > maxerrors:
#hmm - seems that we do not see any NMEA data
AVNLog.debug("did not see any NMEA data for %d lines - close and reopen",maxerrors)
try:
self.device.close()
except:
pass
break
continue
else:
pass
if len(data) < 5:
AVNLog.debug("ignore short data %s",data)
else:
numerrors=0
lastTime=time.time()
if not NMEAParser.checkFilter(data,filter):
continue
if not hasNMEA:
self.setInfo("reader receiving NMEA %s at %d baud"%(portname,self.device.baudrate),WorkerStatus.NMEA)
hasNMEA=True
if not self.writeData is None:
self.writeData(data,source=self.sourceName)
else:
AVNLog.debug("unable to write data")
if (time.time() - lastTime) > porttimeout:
self.setInfo("timeout",WorkerStatus.ERROR)
self.device.close()
self.device=None
if isOpen:
AVNLog.info("reopen port %s - timeout elapsed",portname)
isOpen=False
else:
AVNLog.debug("reopen port %s - timeout elapsed",portname)
break
except:
AVNLog.info("exception in receiver %s"%traceback.format_exc())
AVNLog.info("stopping handler")
self.setInfo("stopped",WorkerStatus.INACTIVE)
self.deleteInfo()
def setInfo(self,txt,status):
if not self.infoHandler is None:
self.infoHandler.setInfo('main',"%s:%s"%(self.param['port'],txt),status)
def deleteInfo(self):
if not self.infoHandler is None:
self.infoHandler.deleteInfo('main')
#a Worker to directly read from a serial line using pyserial
#on windows use an int for the port - e.g. use 4 for COM5
#on linux use the device name for the port
#if no data is received within timeout *10 the port is closed and reopened
#this gives the chance to handle dynamically assigned ports with no issues
#if useFeeder is set, pipe the received data through our feeder
#this gives the chance to output them at NMEA output interfaces
class AVNSerialReader(AVNWorker):
@classmethod
def getConfigName(cls):
return "AVNSerialReader"
@classmethod
def getConfigParam(cls, child=None):
if not child is None:
return None
cfg=SerialReader.getConfigParam()
ownCfg=[
WorkerParameter('feederName','',type=WorkerParameter.T_STRING,
description='if this one is set, we do not use the defaul feeder by this one',
editable=False)
]
return cfg+ownCfg
@classmethod
def createInstance(cls, cfgparam):
if not hasSerial:
AVNLog.warn("serial readers configured but serial module not available, ignore them")
return None
rt=AVNSerialReader(cfgparam)
return rt
@classmethod
def getEditableParameters(cls, makeCopy=True,id=None):
rt= super().getEditableParameters(True,id=id)
slist=SerialReader.listSerialPorts()
slist=UsedResource.filterListByUsed(UsedResource.T_SERIAL,slist,
cls.findUsersOf(UsedResource.T_SERIAL,ownId=id))
WorkerParameter.updateParamFor(rt, 'port', {'rangeOrList':slist})
return rt
@classmethod
def canEdit(cls):
return True
@classmethod
def canDeleteHandler(cls):
return True
def __init__(self,param):
for p in ('port','timeout'):
if param.get(p) is None:
raise Exception("missing "+p+" parameter for serial reader")
self.writeData=None
self.reader=None
AVNWorker.__init__(self, param)
#make some checks when we have to start
#we cannot do this on init as we potentiall have tp find the feeder...
def startInstance(self,navdata):
feedername=self.getStringParam('feederName')
feeder=self.findFeeder(feedername)
if feeder is None:
raise Exception("%s: cannot find a suitable feeder (name %s)",self.getName(),feedername or "")
self.writeData=feeder.addNMEA
super().startInstance(navdata)
def checkConfig(self, param):
if 'port' in param:
self.checkUsedResource(UsedResource.T_SERIAL,param.get('port'))
#thread run method - just try forever
def run(self):
while not self.shouldStop():
self.freeAllUsedResources()
self.claimUsedResource(UsedResource.T_SERIAL,self.getParamValue('port'))
try:
self.reader=SerialReader(self.param, self.writeData,self,self.getSourceName(self.getParamValue('port')))
self.reader.run()
except Exception as e:
AVNLog.error("exception in serial reader %s",traceback.format_exc())
def updateConfig(self, param,child=None):
if 'port' in param:
self.checkUsedResource(UsedResource.T_SERIAL,param['port'])
super().updateConfig(param)
self.reader.stopHandler()
def stop(self):
super().stop()
try:
self.reader.stopHandler()
except:
pass
avnav_handlerList.registerHandler(AVNSerialReader)
|
|
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find warnings for C++ code.
TODO(nnorwitz): provide a mechanism to configure which warnings should
be generated and which should be suppressed. Currently, all possible
warnings will always be displayed. There is no way to suppress any.
There also needs to be a way to use annotations in the source code to
suppress warnings.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
from . import ast
from . import headers
from . import keywords
from . import metrics
from . import symbols
from . import tokenize
from . import utils
try:
basestring
except NameError:
basestring = str
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# The filename extension used for the primary header file associated w/.cc
# file.
PRIMARY_HEADER_EXTENSION = '.h'
HEADER_EXTENSIONS = frozenset(['.h', '.hh', '.hpp', '.h++', '.hxx', '.cuh'])
CPP_EXTENSIONS = frozenset(['.cc', '.cpp', '.c++', '.cxx', '.cu'])
# These enumerations are used to determine how an symbol/#include file is used.
UNUSED = 0
USES_REFERENCE = 1
USES_DECLARATION = 2
DECLARATION_TYPES = (ast.Class, ast.Struct, ast.Enum, ast.Union)
class Module(object):
"""Data container represting a single source file."""
def __init__(self, filename, ast_list):
self.filename = filename
self.normalized_filename = os.path.abspath(filename)
self.ast_list = ast_list
self.public_symbols = self._get_exported_symbols()
def _get_exported_symbols(self):
if not self.ast_list:
return {}
return dict([(n.name, n) for n in self.ast_list if n.is_exportable()])
def is_header_file(filename):
_, ext = os.path.splitext(filename)
return ext.lower() in HEADER_EXTENSIONS
def is_cpp_file(filename):
_, ext = os.path.splitext(filename)
return ext.lower() in CPP_EXTENSIONS
class WarningHunter(object):
def __init__(self, filename, source, ast_list, include_paths, quiet=False):
self.filename = filename
self.normalized_filename = os.path.abspath(filename)
self.source = source
self.ast_list = ast_list
self.include_paths = include_paths[:]
self.quiet = quiet
self.symbol_table = symbols.SymbolTable()
self.metrics = metrics.Metrics(source)
self.warnings = set()
def _add_warning(self, msg, node, filename=None):
if filename is not None:
contents = utils.read_file(filename)
if contents is None:
return
src_metrics = metrics.Metrics(contents)
else:
filename = self.filename
src_metrics = self.metrics
line_number = get_line_number(src_metrics, node)
self.warnings.add((filename, line_number, msg))
def show_warnings(self):
for filename, line_num, msg in sorted(self.warnings):
if line_num == 0:
print('{}: {}'.format(filename, msg))
else:
print('{}:{}: {}'.format(filename, line_num, msg))
def find_warnings(self):
if is_header_file(self.filename):
self._find_header_warnings()
elif is_cpp_file(self.filename):
self._find_source_warnings()
def _update_symbol_table(self, module):
for name, node in module.public_symbols.items():
self.symbol_table.add_symbol(name, node.namespace, node, module)
def _get_module(self, node):
filename = node.filename
(source, filename) = headers.read_source(
filename,
include_paths=[os.path.dirname(self.filename)] + self.include_paths
)
if source is None:
module = Module(filename, None)
msg = "unable to find '{}'".format(filename)
self._add_warning(msg, node)
else:
try:
builder = ast.builder_from_source(source, filename,
quiet=self.quiet)
module = Module(filename,
[_f for _f in builder.generate() if _f])
except (ast.ParseError,
tokenize.TokenError) as error:
if not self.quiet:
print(
"Exception while processing '{}': {}".format(
filename,
error),
file=sys.stderr)
module = Module(filename, None)
else:
self._update_symbol_table(module)
return module
def _read_and_parse_includes(self):
# Map header-filename: (#include AST node, module).
included_files = {}
# Map declaration-name: AST node.
forward_declarations = {}
for node in self.ast_list:
# Ignore #include <> files. Only handle #include "".
# Assume that <> are used for only basic C/C++ headers.
if isinstance(node, ast.Include) and not node.system:
module = self._get_module(node)
included_files[module.normalized_filename] = node, module
if isinstance(node, DECLARATION_TYPES) and node.is_declaration():
forward_declarations[node.full_name()] = node
return included_files, forward_declarations
def _verify_includes(self):
"""Read and parse all the #include'd files and warn about really stupid
things that can be determined from the #include'd file name."""
files_seen = {}
for node in self.ast_list:
# Ignore #include <> files. Only handle #include "".
# Assume that <> are used for only basic C/C++ headers.
if isinstance(node, ast.Include) and not node.system:
module = self._get_module(node)
filename = module.normalized_filename
normalized_filename = module.normalized_filename
if is_cpp_file(filename):
self._add_warning(
"should not #include C++ source file '{}'".format(
node.filename),
node)
if normalized_filename == self.normalized_filename:
self._add_warning(
"'{}' #includes itself".format(node.filename),
node)
if normalized_filename in files_seen:
include_node = files_seen[normalized_filename]
line_num = get_line_number(self.metrics, include_node)
self._add_warning(
"'{}' already #included on line {}".format(
node.filename,
line_num),
node)
files_seen[normalized_filename] = node
def _verify_include_files_used(self, file_uses, included_files):
"""Find all #include files that are unnecessary."""
for include_file, use in file_uses.items():
if not use & USES_DECLARATION:
node, module = included_files[include_file]
if module.ast_list is not None:
msg = "'{}' does not need to be #included".format(
node.filename)
if use & USES_REFERENCE:
msg += '; use a forward declaration instead'
self._add_warning(msg, node)
def _verify_forward_declarations_used(self, forward_declarations,
decl_uses, file_uses):
"""Find all the forward declarations that are not used."""
for cls in forward_declarations:
if cls in file_uses:
if not decl_uses[cls] & USES_DECLARATION:
node = forward_declarations[cls]
msg = ("'{}' forward declared, "
'but needs to be #included'.format(cls))
self._add_warning(msg, node)
else:
if decl_uses[cls] == UNUSED:
node = forward_declarations[cls]
msg = "'{}' not used".format(cls)
self._add_warning(msg, node)
def _determine_uses(self, included_files, forward_declarations):
"""Set up the use type of each symbol."""
file_uses = dict.fromkeys(included_files, UNUSED)
decl_uses = dict.fromkeys(forward_declarations, UNUSED)
symbol_table = self.symbol_table
def _add_reference(name, namespace):
if name in decl_uses:
decl_uses[name] |= USES_REFERENCE
else:
nss = ''
for ns in namespace:
if ns is None:
continue
nss += ns + '::'
if nss + name in decl_uses:
decl_uses[nss + name] |= USES_REFERENCE
return
try:
file_use_node = symbol_table.lookup_symbol(name, namespace)
except symbols.Error:
return
name = file_use_node[1].normalized_filename
if name in file_uses:
if isinstance(file_use_node[0], ast.Typedef):
file_uses[name] |= USES_DECLARATION
else:
file_uses[name] |= USES_REFERENCE
def _add_use(name, namespace):
if isinstance(name, list):
# name contains a list of tokens.
name = '::'.join([n.name for n in name])
elif not isinstance(name, basestring):
# Happens when variables are defined with inlined types, e.g.:
# enum {...} variable;
return
try:
file_use_node = symbol_table.lookup_symbol(name, namespace)
except symbols.Error:
# TODO(nnorwitz): symbols from the current module
# should be added to the symbol table and then this
# exception should not happen...unless the code relies
# on another header for proper compilation.
# Store the use since we might really need to #include it.
if namespace and None not in namespace and '::' not in name:
name = '::'.join(namespace) + '::' + name
file_uses[name] = file_uses.get(name, 0) | USES_DECLARATION
return
# TODO(nnorwitz): do proper check for ref/pointer/symbol.
name = file_use_node[1].normalized_filename
if name in file_uses:
file_uses[name] |= USES_DECLARATION
def _add_variable(node, namespace):
if node.reference or node.pointer:
_add_reference(node.name, namespace)
else:
_add_use(node.name, namespace)
# This needs to recurse when the node is a templated type.
_add_template_use(node.name,
node.templated_types,
namespace)
def _process_function(function):
if function.return_type:
return_type = function.return_type
_add_variable(return_type,
function.namespace)
templated_types = function.templated_types or ()
for p in function.parameters:
if p.type.name not in templated_types:
if function.body and p.name and p.type.name:
# Assume that if the the function has a body and a name
# the parameter type is really used.
# NOTE(nnorwitz): this is over-aggressive. It would be
# better to iterate through the body and determine
# actual uses based on local vars and data members
# used.
_add_use(p.type.name, function.namespace)
else:
_add_variable(p.type, function.namespace)
def _process_function_body(function, namespace):
iterator = iter(function.body)
for t in iterator:
if t.token_type == tokenize.NAME:
if not keywords.is_keyword(t.name):
# TODO(nnorwitz): handle :: names.
# TODO(nnorwitz): handle static function calls.
# TODO(nnorwitz): handle using statements in file.
# TODO(nnorwitz): handle using statements in function.
# TODO(nnorwitz): handle namespace assignment in file.
_add_use(t.name, namespace)
elif t.name in ('.', '->'):
# Skip tokens after a dereference.
next(iterator)
def _add_template_use(name, types, namespace):
if types:
for cls in types:
if name.endswith('_ptr') or cls.pointer:
# Special case templated classes that end w/_ptr.
# These are things like auto_ptr which do
# not require the class definition, only decl.
_add_reference(cls.name, namespace)
else:
_add_use(cls.name, namespace)
_add_template_use(cls.name, cls.templated_types, namespace)
# Iterate through the source AST/tokens, marking each symbols use.
ast_seq = [self.ast_list]
while ast_seq:
for node in ast_seq.pop():
if isinstance(node, ast.VariableDeclaration):
_add_variable(node.type, node.namespace)
elif isinstance(node, ast.Function):
_process_function(node)
if node.body:
_process_function_body(node, node.namespace)
elif isinstance(node, ast.Typedef):
alias = node.alias
if isinstance(alias, ast.Type):
if alias.reference or alias.pointer:
_add_reference(alias.name, node.namespace)
else:
_add_use(alias.name, node.namespace)
_add_template_use('<typedef>', alias.templated_types,
node.namespace)
elif isinstance(node, ast.Friend):
if node.expr and node.expr[0].name == 'class':
name = ''.join([n.name for n in node.expr[1:]])
_add_reference(name, node.namespace)
elif isinstance(node, ast.Class) and node.body is not None:
if node.body:
ast_seq.append(node.body)
_add_template_use('', node.bases, node.namespace)
elif isinstance(node, ast.Union) and node.fields:
pass # TODO(nnorwitz): impl
return file_uses, decl_uses
def _find_unused_warnings(self):
included_files, forward_declarations = self._read_and_parse_includes()
file_uses, decl_uses = self._determine_uses(included_files,
forward_declarations)
self._verify_includes()
self._verify_include_files_used(file_uses, included_files)
self._verify_forward_declarations_used(forward_declarations, decl_uses,
file_uses)
def _find_header_warnings(self):
self._find_unused_warnings()
# TODO(nnorwitz): other warnings to add:
# * too much non-template impl in header file
# * too many methods/data members
# * missing include for classes used for inheritenace
def _find_public_function_warnings(self, node, name, primary_header,
all_headers):
# Not found in the primary header, search all other headers.
for _, header in all_headers.values():
if name in header.public_symbols:
# If the primary.filename == header.filename, it probably
# indicates an error elsewhere. It sucks to mask it,
# but false positives are worse.
if primary_header:
msg = ("expected to find '{}' in '{}', "
"but found in '{}'".format(name,
primary_header.filename,
header.filename))
self._add_warning(msg, node)
break
else:
where = 'in any directly #included header'
if primary_header:
where = (
"in expected header '{}'"
' or any other directly #included header'.format(
primary_header.filename))
if name != 'main':
self._add_warning("'{}' not found {}".format(name, where),
node)
def _check_public_functions(self, primary_header, all_headers):
"""Verify all the public functions are also declared in a header
file."""
public_symbols = {}
declared_only_symbols = {}
if primary_header:
for name, symbol in primary_header.public_symbols.items():
if isinstance(symbol, ast.Function):
public_symbols[name] = symbol
declared_only_symbols = dict.fromkeys(public_symbols, True)
for node in self.ast_list:
# Make sure we have a function that should be exported.
if not isinstance(node, ast.Function):
continue
if isinstance(node, ast.Method):
# Ensure that for Foo::Bar, Foo is *not* a namespace.
# If Foo is a namespace, we have a function and not a method.
names = [n.name for n in node.in_class]
if names != self.symbol_table.get_namespace(names):
continue
if not (node.is_definition() and node.is_exportable()):
continue
# This function should be declared in a header file.
name = node.name
if name in public_symbols:
declared_only_symbols[name] = False
else:
self._find_public_function_warnings(node,
name,
primary_header,
all_headers)
for name, declared_only in declared_only_symbols.items():
if declared_only:
node = public_symbols[name]
if not node.templated_types:
msg = "'{}' declared but not defined".format(name)
self._add_warning(msg, node, primary_header.filename)
def _get_primary_header(self, included_files):
basename = os.path.splitext(self.normalized_filename)[0]
primary_header = included_files.get(
basename + PRIMARY_HEADER_EXTENSION)
if not primary_header:
primary_header = included_files.get(basename)
if primary_header:
return primary_header[1]
return None
def _find_source_warnings(self):
included_files, forward_declarations = self._read_and_parse_includes()
for node in forward_declarations.values():
# TODO(nnorwitz): This really isn't a problem, but might
# be something to warn against. I expect this will either
# be configurable or removed in the future. But it's easy
# to check for now.
msg = (
"'{}' forward declaration not expected in source file".format(
node.name))
self._add_warning(msg, node)
# A primary header is optional. However, when looking up
# defined methods in the source, always look in the
# primary_header first. Expect that is the most likely location.
# Use of primary_header is primarily an optimization.
primary_header = self._get_primary_header(included_files)
if not primary_header and not any(node for node in self.ast_list
if isinstance(node, ast.Function) and
node.name == 'main'):
msg = 'unable to find header file with matching name'
self.warnings.add((self.filename, 0, msg))
self._check_public_functions(primary_header, included_files)
if primary_header and primary_header.ast_list is not None:
includes = [
node.filename
for node in primary_header.ast_list
if isinstance(node, ast.Include)
]
for (node, _) in included_files.values():
if node.filename in includes:
msg = "'{}' already #included in '{}'".format(
node.filename, primary_header.filename)
self._add_warning(msg, node)
# TODO(nnorwitz): other warnings to add:
# * unused forward decls for variables (globals)/classes
# * Functions that are too large/complex
# * Variables declared far from first use
# * primitive member variables not initialized in ctor
def get_line_number(metrics_instance, node):
return metrics_instance.get_line_number(node.start)
def run(filename, source, entire_ast, include_paths, quiet):
hunter = WarningHunter(filename, source, entire_ast,
include_paths=include_paths,
quiet=quiet)
hunter.find_warnings()
hunter.show_warnings()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.