repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
hopshadoop/hops-tensorflow | refs/heads/master | yarntf/examples/slim/nets/nets_factory.py | 6 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from nets import alexnet
from nets import cifarnet
from nets import inception
from nets import lenet
from nets import overfeat
from nets import resnet_v1
from nets import resnet_v2
from nets import vgg
slim = tf.contrib.slim
networks_map = {'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a,
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
'inception_v1': inception.inception_v1,
'inception_v2': inception.inception_v2,
'inception_v3': inception.inception_v3,
'inception_v4': inception.inception_v4,
'inception_resnet_v2': inception.inception_resnet_v2,
'lenet': lenet.lenet,
'resnet_v1_50': resnet_v1.resnet_v1_50,
'resnet_v1_101': resnet_v1.resnet_v1_101,
'resnet_v1_152': resnet_v1.resnet_v1_152,
'resnet_v1_200': resnet_v1.resnet_v1_200,
'resnet_v2_50': resnet_v2.resnet_v2_50,
'resnet_v2_101': resnet_v2.resnet_v2_101,
'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200,
}
arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope,
'vgg_16': vgg.vgg_arg_scope,
'vgg_19': vgg.vgg_arg_scope,
'inception_v1': inception.inception_v3_arg_scope,
'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2':
inception.inception_resnet_v2_arg_scope,
'lenet': lenet.lenet_arg_scope,
'resnet_v1_50': resnet_v1.resnet_arg_scope,
'resnet_v1_101': resnet_v1.resnet_arg_scope,
'resnet_v1_152': resnet_v1.resnet_arg_scope,
'resnet_v1_200': resnet_v1.resnet_arg_scope,
'resnet_v2_50': resnet_v2.resnet_arg_scope,
'resnet_v2_101': resnet_v2.resnet_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope,
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
"""Returns a network_fn such as `logits, end_points = network_fn(images)`.
Args:
name: The name of the network.
num_classes: The number of classes to use for classification.
weight_decay: The l2 coefficient for the model weights.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
network_fn: A function that applies the model to a batch of images. It has
the following signature:
logits, end_points = network_fn(images)
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
@functools.wraps(func)
def network_fn(images):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes, is_training=is_training)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
|
emon10005/sympy | refs/heads/master | sympy/simplify/hyperexpand.py | 62 | """
Expand Hypergeometric (and Meijer G) functions into named
special functions.
The algorithm for doing this uses a collection of lookup tables of
hypergeometric functions, and various of their properties, to expand
many hypergeometric functions in terms of special functions.
It is based on the following paper:
Kelly B. Roach. Meijer G Function Representations.
In: Proceedings of the 1997 International Symposium on Symbolic and
Algebraic Computation, pages 205-211, New York, 1997. ACM.
It is described in great(er) detail in the Sphinx documentation.
"""
# SUMMARY OF EXTENSIONS FOR MEIJER G FUNCTIONS
#
# o z**rho G(ap, bq; z) = G(ap + rho, bq + rho; z)
#
# o denote z*d/dz by D
#
# o It is helpful to keep in mind that ap and bq play essentially symmetric
# roles: G(1/z) has slightly altered parameters, with ap and bq interchanged.
#
# o There are four shift operators:
# A_J = b_J - D, J = 1, ..., n
# B_J = 1 - a_j + D, J = 1, ..., m
# C_J = -b_J + D, J = m+1, ..., q
# D_J = a_J - 1 - D, J = n+1, ..., p
#
# A_J, C_J increment b_J
# B_J, D_J decrement a_J
#
# o The corresponding four inverse-shift operators are defined if there
# is no cancellation. Thus e.g. an index a_J (upper or lower) can be
# incremented if a_J != b_i for i = 1, ..., q.
#
# o Order reduction: if b_j - a_i is a non-negative integer, where
# j <= m and i > n, the corresponding quotient of gamma functions reduces
# to a polynomial. Hence the G function can be expressed using a G-function
# of lower order.
# Similarly if j > m and i <= n.
#
# Secondly, there are paired index theorems [Adamchik, The evaluation of
# integrals of Bessel functions via G-function identities]. Suppose there
# are three parameters a, b, c, where a is an a_i, i <= n, b is a b_j,
# j <= m and c is a denominator parameter (i.e. a_i, i > n or b_j, j > m).
# Suppose further all three differ by integers.
# Then the order can be reduced.
# TODO work this out in detail.
#
# o An index quadruple is called suitable if its order cannot be reduced.
# If there exists a sequence of shift operators transforming one index
# quadruple into another, we say one is reachable from the other.
#
# o Deciding if one index quadruple is reachable from another is tricky. For
# this reason, we use hand-built routines to match and instantiate formulas.
#
from __future__ import print_function, division
from collections import defaultdict
from itertools import product
from sympy import SYMPY_DEBUG
from sympy.core import (S, Dummy, symbols, sympify, Tuple, expand, I, pi, Mul,
EulerGamma, oo, zoo, expand_func, Add, nan, Expr)
from sympy.core.mod import Mod
from sympy.core.compatibility import default_sort_key, range
from sympy.utilities.iterables import sift
from sympy.functions import (exp, sqrt, root, log, lowergamma, cos,
besseli, gamma, uppergamma, expint, erf, sin, besselj, Ei, Ci, Si, Shi,
sinh, cosh, Chi, fresnels, fresnelc, polar_lift, exp_polar, floor, ceiling,
rf, factorial, lerchphi, Piecewise, re, elliptic_k, elliptic_e)
from sympy.functions.special.hyper import (hyper, HyperRep_atanh,
HyperRep_power1, HyperRep_power2, HyperRep_log1, HyperRep_asin1,
HyperRep_asin2, HyperRep_sqrts1, HyperRep_sqrts2, HyperRep_log2,
HyperRep_cosasin, HyperRep_sinasin, meijerg)
from sympy.simplify import simplify
from sympy.functions.elementary.complexes import polarify, unpolarify
from sympy.simplify.powsimp import powdenest
from sympy.polys import poly, Poly
from sympy.series import residue
# function to define "buckets"
def _mod1(x):
# TODO see if this can work as Mod(x, 1); this will require
# different handling of the "buckets" since these need to
# be sorted and that fails when there is a mixture of
# integers and expressions with parameters. With the current
# Mod behavior, Mod(k, 1) == Mod(1, 1) == 0 if k is an integer.
# Although the sorting can be done with Basic.compare, this may
# still require different handling of the sorted buckets.
if x.is_Number:
return Mod(x, 1)
c, x = x.as_coeff_Add()
return Mod(c, 1) + x
# leave add formulae at the top for easy reference
def add_formulae(formulae):
""" Create our knowledge base. """
from sympy.matrices import Matrix
a, b, c, z = symbols('a b c, z', cls=Dummy)
def add(ap, bq, res):
func = Hyper_Function(ap, bq)
formulae.append(Formula(func, z, res, (a, b, c)))
def addb(ap, bq, B, C, M):
func = Hyper_Function(ap, bq)
formulae.append(Formula(func, z, None, (a, b, c), B, C, M))
# Luke, Y. L. (1969), The Special Functions and Their Approximations,
# Volume 1, section 6.2
# 0F0
add((), (), exp(z))
# 1F0
add((a, ), (), HyperRep_power1(-a, z))
# 2F1
addb((a, a - S.Half), (2*a, ),
Matrix([HyperRep_power2(a, z),
HyperRep_power2(a + S(1)/2, z)/2]),
Matrix([[1, 0]]),
Matrix([[(a - S.Half)*z/(1 - z), (S.Half - a)*z/(1 - z)],
[a/(1 - z), a*(z - 2)/(1 - z)]]))
addb((1, 1), (2, ),
Matrix([HyperRep_log1(z), 1]), Matrix([[-1/z, 0]]),
Matrix([[0, z/(z - 1)], [0, 0]]))
addb((S.Half, 1), (S('3/2'), ),
Matrix([HyperRep_atanh(z), 1]),
Matrix([[1, 0]]),
Matrix([[-S(1)/2, 1/(1 - z)/2], [0, 0]]))
addb((S.Half, S.Half), (S('3/2'), ),
Matrix([HyperRep_asin1(z), HyperRep_power1(-S(1)/2, z)]),
Matrix([[1, 0]]),
Matrix([[-S(1)/2, S(1)/2], [0, z/(1 - z)/2]]))
addb((a, S.Half + a), (S.Half, ),
Matrix([HyperRep_sqrts1(-a, z), -HyperRep_sqrts2(-a - S(1)/2, z)]),
Matrix([[1, 0]]),
Matrix([[0, -a],
[z*(-2*a - 1)/2/(1 - z), S.Half - z*(-2*a - 1)/(1 - z)]]))
# A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
# Integrals and Series: More Special Functions, Vol. 3,.
# Gordon and Breach Science Publisher
addb([a, -a], [S.Half],
Matrix([HyperRep_cosasin(a, z), HyperRep_sinasin(a, z)]),
Matrix([[1, 0]]),
Matrix([[0, -a], [a*z/(1 - z), 1/(1 - z)/2]]))
addb([1, 1], [3*S.Half],
Matrix([HyperRep_asin2(z), 1]), Matrix([[1, 0]]),
Matrix([[(z - S.Half)/(1 - z), 1/(1 - z)/2], [0, 0]]))
# Complete elliptic integrals K(z) and E(z), both a 2F1 function
addb([S.Half, S.Half], [S.One],
Matrix([elliptic_k(z), elliptic_e(z)]),
Matrix([[2/pi, 0]]),
Matrix([[-S.Half, -1/(2*z-2)],
[-S.Half, S.Half]]))
addb([-S.Half, S.Half], [S.One],
Matrix([elliptic_k(z), elliptic_e(z)]),
Matrix([[0, 2/pi]]),
Matrix([[-S.Half, -1/(2*z-2)],
[-S.Half, S.Half]]))
# 3F2
addb([-S.Half, 1, 1], [S.Half, 2],
Matrix([z*HyperRep_atanh(z), HyperRep_log1(z), 1]),
Matrix([[-S(2)/3, -S(1)/(3*z), S(2)/3]]),
Matrix([[S(1)/2, 0, z/(1 - z)/2],
[0, 0, z/(z - 1)],
[0, 0, 0]]))
# actually the formula for 3/2 is much nicer ...
addb([-S.Half, 1, 1], [2, 2],
Matrix([HyperRep_power1(S(1)/2, z), HyperRep_log2(z), 1]),
Matrix([[S(4)/9 - 16/(9*z), 4/(3*z), 16/(9*z)]]),
Matrix([[z/2/(z - 1), 0, 0], [1/(2*(z - 1)), 0, S.Half], [0, 0, 0]]))
# 1F1
addb([1], [b], Matrix([z**(1 - b) * exp(z) * lowergamma(b - 1, z), 1]),
Matrix([[b - 1, 0]]), Matrix([[1 - b + z, 1], [0, 0]]))
addb([a], [2*a],
Matrix([z**(S.Half - a)*exp(z/2)*besseli(a - S.Half, z/2)
* gamma(a + S.Half)/4**(S.Half - a),
z**(S.Half - a)*exp(z/2)*besseli(a + S.Half, z/2)
* gamma(a + S.Half)/4**(S.Half - a)]),
Matrix([[1, 0]]),
Matrix([[z/2, z/2], [z/2, (z/2 - 2*a)]]))
mz = polar_lift(-1)*z
addb([a], [a + 1],
Matrix([mz**(-a)*a*lowergamma(a, mz), a*exp(z)]),
Matrix([[1, 0]]),
Matrix([[-a, 1], [0, z]]))
# This one is redundant.
add([-S.Half], [S.Half], exp(z) - sqrt(pi*z)*(-I)*erf(I*sqrt(z)))
# Added to get nice results for Laplace transform of Fresnel functions
# http://functions.wolfram.com/07.22.03.6437.01
# Basic rule
#add([1], [S(3)/4, S(5)/4],
# sqrt(pi) * (cos(2*sqrt(polar_lift(-1)*z))*fresnelc(2*root(polar_lift(-1)*z,4)/sqrt(pi)) +
# sin(2*sqrt(polar_lift(-1)*z))*fresnels(2*root(polar_lift(-1)*z,4)/sqrt(pi)))
# / (2*root(polar_lift(-1)*z,4)))
# Manually tuned rule
addb([1], [S(3)/4, S(5)/4],
Matrix([ sqrt(pi)*(I*sinh(2*sqrt(z))*fresnels(2*root(z, 4)*exp(I*pi/4)/sqrt(pi))
+ cosh(2*sqrt(z))*fresnelc(2*root(z, 4)*exp(I*pi/4)/sqrt(pi)))
* exp(-I*pi/4)/(2*root(z, 4)),
sqrt(pi)*root(z, 4)*(sinh(2*sqrt(z))*fresnelc(2*root(z, 4)*exp(I*pi/4)/sqrt(pi))
+ I*cosh(2*sqrt(z))*fresnels(2*root(z, 4)*exp(I*pi/4)/sqrt(pi)))
*exp(-I*pi/4)/2,
1 ]),
Matrix([[1, 0, 0]]),
Matrix([[-S(1)/4, 1, S(1)/4],
[ z, S(1)/4, 0 ],
[ 0, 0, 0 ]]))
# 2F2
addb([S.Half, a], [S(3)/2, a + 1],
Matrix([a/(2*a - 1)*(-I)*sqrt(pi/z)*erf(I*sqrt(z)),
a/(2*a - 1)*(polar_lift(-1)*z)**(-a)*
lowergamma(a, polar_lift(-1)*z),
a/(2*a - 1)*exp(z)]),
Matrix([[1, -1, 0]]),
Matrix([[-S.Half, 0, 1], [0, -a, 1], [0, 0, z]]))
# We make a "basis" of four functions instead of three, and give EulerGamma
# an extra slot (it could just be a coefficient to 1). The advantage is
# that this way Polys will not see multivariate polynomials (it treats
# EulerGamma as an indeterminate), which is *way* faster.
addb([1, 1], [2, 2],
Matrix([Ei(z) - log(z), exp(z), 1, EulerGamma]),
Matrix([[1/z, 0, 0, -1/z]]),
Matrix([[0, 1, -1, 0], [0, z, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]))
# 0F1
add((), (S.Half, ), cosh(2*sqrt(z)))
addb([], [b],
Matrix([gamma(b)*z**((1 - b)/2)*besseli(b - 1, 2*sqrt(z)),
gamma(b)*z**(1 - b/2)*besseli(b, 2*sqrt(z))]),
Matrix([[1, 0]]), Matrix([[0, 1], [z, (1 - b)]]))
# 0F3
x = 4*z**(S(1)/4)
def fp(a, z):
return besseli(a, x) + besselj(a, x)
def fm(a, z):
return besseli(a, x) - besselj(a, x)
# TODO branching
addb([], [S.Half, a, a + S.Half],
Matrix([fp(2*a - 1, z), fm(2*a, z)*z**(S(1)/4),
fm(2*a - 1, z)*sqrt(z), fp(2*a, z)*z**(S(3)/4)])
* 2**(-2*a)*gamma(2*a)*z**((1 - 2*a)/4),
Matrix([[1, 0, 0, 0]]),
Matrix([[0, 1, 0, 0],
[0, S(1)/2 - a, 1, 0],
[0, 0, S(1)/2, 1],
[z, 0, 0, 1 - a]]))
x = 2*(4*z)**(S(1)/4)*exp_polar(I*pi/4)
addb([], [a, a + S.Half, 2*a],
(2*sqrt(polar_lift(-1)*z))**(1 - 2*a)*gamma(2*a)**2 *
Matrix([besselj(2*a - 1, x)*besseli(2*a - 1, x),
x*(besseli(2*a, x)*besselj(2*a - 1, x)
- besseli(2*a - 1, x)*besselj(2*a, x)),
x**2*besseli(2*a, x)*besselj(2*a, x),
x**3*(besseli(2*a, x)*besselj(2*a - 1, x)
+ besseli(2*a - 1, x)*besselj(2*a, x))]),
Matrix([[1, 0, 0, 0]]),
Matrix([[0, S(1)/4, 0, 0],
[0, (1 - 2*a)/2, -S(1)/2, 0],
[0, 0, 1 - 2*a, S(1)/4],
[-32*z, 0, 0, 1 - a]]))
# 1F2
addb([a], [a - S.Half, 2*a],
Matrix([z**(S.Half - a)*besseli(a - S.Half, sqrt(z))**2,
z**(1 - a)*besseli(a - S.Half, sqrt(z))
*besseli(a - S(3)/2, sqrt(z)),
z**(S(3)/2 - a)*besseli(a - S(3)/2, sqrt(z))**2]),
Matrix([[-gamma(a + S.Half)**2/4**(S.Half - a),
2*gamma(a - S.Half)*gamma(a + S.Half)/4**(1 - a),
0]]),
Matrix([[1 - 2*a, 1, 0], [z/2, S.Half - a, S.Half], [0, z, 0]]))
addb([S.Half], [b, 2 - b],
pi*(1 - b)/sin(pi*b)*
Matrix([besseli(1 - b, sqrt(z))*besseli(b - 1, sqrt(z)),
sqrt(z)*(besseli(-b, sqrt(z))*besseli(b - 1, sqrt(z))
+ besseli(1 - b, sqrt(z))*besseli(b, sqrt(z))),
besseli(-b, sqrt(z))*besseli(b, sqrt(z))]),
Matrix([[1, 0, 0]]),
Matrix([[b - 1, S(1)/2, 0],
[z, 0, z],
[0, S(1)/2, -b]]))
addb([S(1)/2], [S(3)/2, S(3)/2],
Matrix([Shi(2*sqrt(z))/2/sqrt(z), sinh(2*sqrt(z))/2/sqrt(z),
cosh(2*sqrt(z))]),
Matrix([[1, 0, 0]]),
Matrix([[-S.Half, S.Half, 0], [0, -S.Half, S.Half], [0, 2*z, 0]]))
# FresnelS
# Basic rule
#add([S(3)/4], [S(3)/2,S(7)/4], 6*fresnels( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) / ( pi * (exp(pi*I/4)*root(z,4)*2/sqrt(pi))**3 ) )
# Manually tuned rule
addb([S(3)/4], [S(3)/2, S(7)/4],
Matrix(
[ fresnels(
exp(
pi*I/4)*root(
z, 4)*2/sqrt(
pi) ) / (
pi * (exp(pi*I/4)*root(z, 4)*2/sqrt(pi))**3 ),
sinh(2*sqrt(z))/sqrt(z),
cosh(2*sqrt(z)) ]),
Matrix([[6, 0, 0]]),
Matrix([[-S(3)/4, S(1)/16, 0],
[ 0, -S(1)/2, 1],
[ 0, z, 0]]))
# FresnelC
# Basic rule
#add([S(1)/4], [S(1)/2,S(5)/4], fresnelc( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) / ( exp(pi*I/4)*root(z,4)*2/sqrt(pi) ) )
# Manually tuned rule
addb([S(1)/4], [S(1)/2, S(5)/4],
Matrix(
[ sqrt(
pi)*exp(
-I*pi/4)*fresnelc(
2*root(z, 4)*exp(I*pi/4)/sqrt(pi))/(2*root(z, 4)),
cosh(2*sqrt(z)),
sinh(2*sqrt(z))*sqrt(z) ]),
Matrix([[1, 0, 0]]),
Matrix([[-S(1)/4, S(1)/4, 0 ],
[ 0, 0, 1 ],
[ 0, z, S(1)/2]]))
# 2F3
# XXX with this five-parameter formula is pretty slow with the current
# Formula.find_instantiations (creates 2!*3!*3**(2+3) ~ 3000
# instantiations ... But it's not too bad.
addb([a, a + S.Half], [2*a, b, 2*a - b + 1],
gamma(b)*gamma(2*a - b + 1) * (sqrt(z)/2)**(1 - 2*a) *
Matrix([besseli(b - 1, sqrt(z))*besseli(2*a - b, sqrt(z)),
sqrt(z)*besseli(b, sqrt(z))*besseli(2*a - b, sqrt(z)),
sqrt(z)*besseli(b - 1, sqrt(z))*besseli(2*a - b + 1, sqrt(z)),
besseli(b, sqrt(z))*besseli(2*a - b + 1, sqrt(z))]),
Matrix([[1, 0, 0, 0]]),
Matrix([[0, S(1)/2, S(1)/2, 0],
[z/2, 1 - b, 0, z/2],
[z/2, 0, b - 2*a, z/2],
[0, S(1)/2, S(1)/2, -2*a]]))
# (C/f above comment about eulergamma in the basis).
addb([1, 1], [2, 2, S(3)/2],
Matrix([Chi(2*sqrt(z)) - log(2*sqrt(z)),
cosh(2*sqrt(z)), sqrt(z)*sinh(2*sqrt(z)), 1, EulerGamma]),
Matrix([[1/z, 0, 0, 0, -1/z]]),
Matrix([[0, S(1)/2, 0, -S(1)/2, 0],
[0, 0, 1, 0, 0],
[0, z, S(1)/2, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))
# 3F3
# This is rule: http://functions.wolfram.com/07.31.03.0134.01
# Initial reason to add it was a nice solution for
# integrate(erf(a*z)/z**2, z) and same for erfc and erfi.
# Basic rule
# add([1, 1, a], [2, 2, a+1], (a/(z*(a-1)**2)) *
# (1 - (-z)**(1-a) * (gamma(a) - uppergamma(a,-z))
# - (a-1) * (EulerGamma + uppergamma(0,-z) + log(-z))
# - exp(z)))
# Manually tuned rule
addb([1, 1, a], [2, 2, a+1],
Matrix([a*(log(-z) + expint(1, -z) + EulerGamma)/(z*(a**2 - 2*a + 1)),
a*(-z)**(-a)*(gamma(a) - uppergamma(a, -z))/(a - 1)**2,
a*exp(z)/(a**2 - 2*a + 1),
a/(z*(a**2 - 2*a + 1))]),
Matrix([[1-a, 1, -1/z, 1]]),
Matrix([[-1,0,-1/z,1],
[0,-a,1,0],
[0,0,z,0],
[0,0,0,-1]]))
def add_meijerg_formulae(formulae):
from sympy.matrices import Matrix
a, b, c, z = list(map(Dummy, 'abcz'))
rho = Dummy('rho')
def add(an, ap, bm, bq, B, C, M, matcher):
formulae.append(MeijerFormula(an, ap, bm, bq, z, [a, b, c, rho],
B, C, M, matcher))
def detect_uppergamma(func):
x = func.an[0]
y, z = func.bm
swapped = False
if not _mod1((x - y).simplify()):
swapped = True
(y, z) = (z, y)
if _mod1((x - z).simplify()) or x - z > 0:
return None
l = [y, x]
if swapped:
l = [x, y]
return {rho: y, a: x - y}, G_Function([x], [], l, [])
add([a + rho], [], [rho, a + rho], [],
Matrix([gamma(1 - a)*z**rho*exp(z)*uppergamma(a, z),
gamma(1 - a)*z**(a + rho)]),
Matrix([[1, 0]]),
Matrix([[rho + z, -1], [0, a + rho]]),
detect_uppergamma)
def detect_3113(func):
"""http://functions.wolfram.com/07.34.03.0984.01"""
x = func.an[0]
u, v, w = func.bm
if _mod1((u - v).simplify()) == 0:
if _mod1((v - w).simplify()) == 0:
return
sig = (S(1)/2, S(1)/2, S(0))
x1, x2, y = u, v, w
else:
if _mod1((x - u).simplify()) == 0:
sig = (S(1)/2, S(0), S(1)/2)
x1, y, x2 = u, v, w
else:
sig = (S(0), S(1)/2, S(1)/2)
y, x1, x2 = u, v, w
if (_mod1((x - x1).simplify()) != 0 or
_mod1((x - x2).simplify()) != 0 or
_mod1((x - y).simplify()) != S(1)/2 or
x - x1 > 0 or x - x2 > 0):
return
return {a: x}, G_Function([x], [], [x - S(1)/2 + t for t in sig], [])
s = sin(2*sqrt(z))
c_ = cos(2*sqrt(z))
S_ = Si(2*sqrt(z)) - pi/2
C = Ci(2*sqrt(z))
add([a], [], [a, a, a - S(1)/2], [],
Matrix([sqrt(pi)*z**(a - S(1)/2)*(c_*S_ - s*C),
sqrt(pi)*z**a*(s*S_ + c_*C),
sqrt(pi)*z**a]),
Matrix([[-2, 0, 0]]),
Matrix([[a - S(1)/2, -1, 0], [z, a, S(1)/2], [0, 0, a]]),
detect_3113)
def make_simp(z):
""" Create a function that simplifies rational functions in ``z``. """
def simp(expr):
""" Efficiently simplify the rational function ``expr``. """
numer, denom = expr.as_numer_denom()
c, numer, denom = poly(numer, z).cancel(poly(denom, z))
return c * numer.as_expr() / denom.as_expr()
return simp
def debug(*args):
if SYMPY_DEBUG:
for a in args:
print(a, end="")
print()
class Hyper_Function(Expr):
""" A generalized hypergeometric function. """
def __new__(cls, ap, bq):
obj = super(Hyper_Function, cls).__new__(cls)
obj.ap = Tuple(*list(map(expand, ap)))
obj.bq = Tuple(*list(map(expand, bq)))
return obj
@property
def args(self):
return (self.ap, self.bq)
@property
def sizes(self):
return (len(self.ap), len(self.bq))
@property
def gamma(self):
"""
Number of upper parameters that are negative integers
This is a transformation invariant.
"""
return sum(bool(x.is_integer and x.is_negative) for x in self.ap)
def _hashable_content(self):
return super(Hyper_Function, self)._hashable_content() + (self.ap,
self.bq)
def __call__(self, arg):
return hyper(self.ap, self.bq, arg)
def build_invariants(self):
"""
Compute the invariant vector.
The invariant vector is:
(gamma, ((s1, n1), ..., (sk, nk)), ((t1, m1), ..., (tr, mr)))
where gamma is the number of integer a < 0,
s1 < ... < sk
nl is the number of parameters a_i congruent to sl mod 1
t1 < ... < tr
ml is the number of parameters b_i congruent to tl mod 1
If the index pair contains parameters, then this is not truly an
invariant, since the parameters cannot be sorted uniquely mod1.
>>> from sympy.simplify.hyperexpand import Hyper_Function
>>> from sympy import S
>>> ap = (S(1)/2, S(1)/3, S(-1)/2, -2)
>>> bq = (1, 2)
Here gamma = 1,
k = 3, s1 = 0, s2 = 1/3, s3 = 1/2
n1 = 1, n2 = 1, n2 = 2
r = 1, t1 = 0
m1 = 2:
>>> Hyper_Function(ap, bq).build_invariants()
(1, ((0, 1), (1/3, 1), (1/2, 2)), ((0, 2),))
"""
abuckets, bbuckets = sift(self.ap, _mod1), sift(self.bq, _mod1)
def tr(bucket):
bucket = list(bucket.items())
if not any(isinstance(x[0], Mod) for x in bucket):
bucket.sort(key=lambda x: default_sort_key(x[0]))
bucket = tuple([(mod, len(values)) for mod, values in bucket if
values])
return bucket
return (self.gamma, tr(abuckets), tr(bbuckets))
def difficulty(self, func):
""" Estimate how many steps it takes to reach ``func`` from self.
Return -1 if impossible. """
if self.gamma != func.gamma:
return -1
oabuckets, obbuckets, abuckets, bbuckets = [sift(params, _mod1) for
params in (self.ap, self.bq, func.ap, func.bq)]
diff = 0
for bucket, obucket in [(abuckets, oabuckets), (bbuckets, obbuckets)]:
for mod in set(list(bucket.keys()) + list(obucket.keys())):
if (not mod in bucket) or (not mod in obucket) \
or len(bucket[mod]) != len(obucket[mod]):
return -1
l1 = list(bucket[mod])
l2 = list(obucket[mod])
l1.sort()
l2.sort()
for i, j in zip(l1, l2):
diff += abs(i - j)
return diff
def _is_suitable_origin(self):
"""
Decide if ``self`` is a suitable origin.
A function is a suitable origin iff:
* none of the ai equals bj + n, with n a non-negative integer
* none of the ai is zero
* none of the bj is a non-positive integer
Note that this gives meaningful results only when none of the indices
are symbolic.
"""
for a in self.ap:
for b in self.bq:
if (a - b).is_integer and (a - b).is_negative is False:
return False
for a in self.ap:
if a == 0:
return False
for b in self.bq:
if b.is_integer and b.is_nonpositive:
return False
return True
class G_Function(Expr):
""" A Meijer G-function. """
def __new__(cls, an, ap, bm, bq):
obj = super(G_Function, cls).__new__(cls)
obj.an = Tuple(*list(map(expand, an)))
obj.ap = Tuple(*list(map(expand, ap)))
obj.bm = Tuple(*list(map(expand, bm)))
obj.bq = Tuple(*list(map(expand, bq)))
return obj
@property
def args(self):
return (self.an, self.ap, self.bm, self.bq)
def _hashable_content(self):
return super(G_Function, self)._hashable_content() + self.args
def __call__(self, z):
return meijerg(self.an, self.ap, self.bm, self.bq, z)
def compute_buckets(self):
"""
Compute buckets for the fours sets of parameters.
We guarantee that any two equal Mod objects returned are actually the
same, and that the buckets are sorted by real part (an and bq
descendending, bm and ap ascending).
Examples
========
>>> from sympy.simplify.hyperexpand import G_Function
>>> from sympy.abc import y
>>> from sympy import S, symbols
>>> a, b = [1, 3, 2, S(3)/2], [1 + y, y, 2, y + 3]
>>> G_Function(a, b, [2], [y]).compute_buckets()
({0: [3, 2, 1], 1/2: [3/2]},
{0: [2], y: [y, y + 1, y + 3]}, {0: [2]}, {y: [y]})
"""
dicts = pan, pap, pbm, pbq = [defaultdict(list) for i in range(4)]
for dic, lis in zip(dicts, (self.an, self.ap, self.bm, self.bq)):
for x in lis:
dic[_mod1(x)].append(x)
for dic, flip in zip(dicts, (True, False, False, True)):
for m, items in dic.items():
x0 = items[0]
items.sort(key=lambda x: x - x0, reverse=flip)
dic[m] = items
return tuple([dict(w) for w in dicts])
@property
def signature(self):
return (len(self.an), len(self.ap), len(self.bm), len(self.bq))
# Dummy variable.
_x = Dummy('x')
class Formula(object):
"""
This class represents hypergeometric formulae.
Its data members are:
- z, the argument
- closed_form, the closed form expression
- symbols, the free symbols (parameters) in the formula
- func, the function
- B, C, M (see _compute_basis)
>>> from sympy.abc import a, b, z
>>> from sympy.simplify.hyperexpand import Formula, Hyper_Function
>>> func = Hyper_Function((a/2, a/3 + b, (1+a)/2), (a, b, (a+b)/7))
>>> f = Formula(func, z, None, [a, b])
"""
def _compute_basis(self, closed_form):
"""
Compute a set of functions B=(f1, ..., fn), a nxn matrix M
and a 1xn matrix C such that:
closed_form = C B
z d/dz B = M B.
"""
from sympy.matrices import Matrix, eye, zeros
afactors = [_x + a for a in self.func.ap]
bfactors = [_x + b - 1 for b in self.func.bq]
expr = _x*Mul(*bfactors) - self.z*Mul(*afactors)
poly = Poly(expr, _x)
n = poly.degree() - 1
b = [closed_form]
for _ in range(n):
b.append(self.z*b[-1].diff(self.z))
self.B = Matrix(b)
self.C = Matrix([[1] + [0]*n])
m = eye(n)
m = m.col_insert(0, zeros(n, 1))
l = poly.all_coeffs()[1:]
l.reverse()
self.M = m.row_insert(n, -Matrix([l])/poly.all_coeffs()[0])
def __init__(self, func, z, res, symbols, B=None, C=None, M=None):
z = sympify(z)
res = sympify(res)
symbols = [x for x in sympify(symbols) if func.has(x)]
self.z = z
self.symbols = symbols
self.B = B
self.C = C
self.M = M
self.func = func
# TODO with symbolic parameters, it could be advantageous
# (for prettier answers) to compute a basis only *after*
# instantiation
if res is not None:
self._compute_basis(res)
@property
def closed_form(self):
return (self.C*self.B)[0]
def find_instantiations(self, func):
"""
Find substitutions of the free symbols that match ``func``.
Return the substitution dictionaries as a list. Note that the returned
instantiations need not actually match, or be valid!
"""
from sympy.solvers import solve
ap = func.ap
bq = func.bq
if len(ap) != len(self.func.ap) or len(bq) != len(self.func.bq):
raise TypeError('Cannot instantiate other number of parameters')
symbol_values = []
for a in self.symbols:
if a in self.func.ap.args:
symbol_values.append(ap)
elif a in self.func.bq.args:
symbol_values.append(bq)
else:
raise ValueError("At least one of the parameters of the "
"formula must be equal to %s" % (a,))
base_repl = [dict(list(zip(self.symbols, values)))
for values in product(*symbol_values)]
abuckets, bbuckets = [sift(params, _mod1) for params in [ap, bq]]
a_inv, b_inv = [dict((a, len(vals)) for a, vals in bucket.items())
for bucket in [abuckets, bbuckets]]
critical_values = [[0] for _ in self.symbols]
result = []
_n = Dummy()
for repl in base_repl:
symb_a, symb_b = [sift(params, lambda x: _mod1(x.xreplace(repl)))
for params in [self.func.ap, self.func.bq]]
for bucket, obucket in [(abuckets, symb_a), (bbuckets, symb_b)]:
for mod in set(list(bucket.keys()) + list(obucket.keys())):
if (not mod in bucket) or (not mod in obucket) \
or len(bucket[mod]) != len(obucket[mod]):
break
for a, vals in zip(self.symbols, critical_values):
if repl[a].free_symbols:
continue
exprs = [expr for expr in obucket[mod] if expr.has(a)]
repl0 = repl.copy()
repl0[a] += _n
for expr in exprs:
for target in bucket[mod]:
n0, = solve(expr.xreplace(repl0) - target, _n)
if n0.free_symbols:
raise ValueError("Value should not be true")
vals.append(n0)
else:
values = []
for a, vals in zip(self.symbols, critical_values):
a0 = repl[a]
min_ = floor(min(vals))
max_ = ceiling(max(vals))
values.append([a0 + n for n in range(min_, max_ + 1)])
result.extend(dict(list(zip(self.symbols, l))) for l in product(*values))
return result
class FormulaCollection(object):
""" A collection of formulae to use as origins. """
def __init__(self):
""" Doing this globally at module init time is a pain ... """
self.symbolic_formulae = {}
self.concrete_formulae = {}
self.formulae = []
add_formulae(self.formulae)
# Now process the formulae into a helpful form.
# These dicts are indexed by (p, q).
for f in self.formulae:
sizes = f.func.sizes
if len(f.symbols) > 0:
self.symbolic_formulae.setdefault(sizes, []).append(f)
else:
inv = f.func.build_invariants()
self.concrete_formulae.setdefault(sizes, {})[inv] = f
def lookup_origin(self, func):
"""
Given the suitable target ``func``, try to find an origin in our
knowledge base.
>>> from sympy.simplify.hyperexpand import (FormulaCollection,
... Hyper_Function)
>>> f = FormulaCollection()
>>> f.lookup_origin(Hyper_Function((), ())).closed_form
exp(_z)
>>> f.lookup_origin(Hyper_Function([1], ())).closed_form
HyperRep_power1(-1, _z)
>>> from sympy import S
>>> i = Hyper_Function([S('1/4'), S('3/4 + 4')], [S.Half])
>>> f.lookup_origin(i).closed_form
HyperRep_sqrts1(-1/4, _z)
"""
inv = func.build_invariants()
sizes = func.sizes
if sizes in self.concrete_formulae and \
inv in self.concrete_formulae[sizes]:
return self.concrete_formulae[sizes][inv]
# We don't have a concrete formula. Try to instantiate.
if not sizes in self.symbolic_formulae:
return None # Too bad...
possible = []
for f in self.symbolic_formulae[sizes]:
repls = f.find_instantiations(func)
for repl in repls:
func2 = f.func.xreplace(repl)
if not func2._is_suitable_origin():
continue
diff = func2.difficulty(func)
if diff == -1:
continue
possible.append((diff, repl, f, func2))
# find the nearest origin
possible.sort(key=lambda x: x[0])
for _, repl, f, func2 in possible:
f2 = Formula(func2, f.z, None, [], f.B.subs(repl),
f.C.subs(repl), f.M.subs(repl))
if not any(e.has(S.NaN, oo, -oo, zoo) for e in [f2.B, f2.M, f2.C]):
return f2
else:
return None
class MeijerFormula(object):
"""
This class represents a Meijer G-function formula.
Its data members are:
- z, the argument
- symbols, the free symbols (parameters) in the formula
- func, the function
- B, C, M (c/f ordinary Formula)
"""
def __init__(self, an, ap, bm, bq, z, symbols, B, C, M, matcher):
an, ap, bm, bq = [Tuple(*list(map(expand, w))) for w in [an, ap, bm, bq]]
self.func = G_Function(an, ap, bm, bq)
self.z = z
self.symbols = symbols
self._matcher = matcher
self.B = B
self.C = C
self.M = M
@property
def closed_form(self):
return (self.C*self.B)[0]
def try_instantiate(self, func):
"""
Try to instantiate the current formula to (almost) match func.
This uses the _matcher passed on init.
"""
if func.signature != self.func.signature:
return None
res = self._matcher(func)
if res is not None:
subs, newfunc = res
return MeijerFormula(newfunc.an, newfunc.ap, newfunc.bm, newfunc.bq,
self.z, [],
self.B.subs(subs), self.C.subs(subs),
self.M.subs(subs), None)
class MeijerFormulaCollection(object):
"""
This class holds a collection of meijer g formulae.
"""
def __init__(self):
formulae = []
add_meijerg_formulae(formulae)
self.formulae = defaultdict(list)
for formula in formulae:
self.formulae[formula.func.signature].append(formula)
self.formulae = dict(self.formulae)
def lookup_origin(self, func):
""" Try to find a formula that matches func. """
if not func.signature in self.formulae:
return None
for formula in self.formulae[func.signature]:
res = formula.try_instantiate(func)
if res is not None:
return res
class Operator(object):
"""
Base class for operators to be applied to our functions.
These operators are differential operators. They are by convention
expressed in the variable D = z*d/dz (although this base class does
not actually care).
Note that when the operator is applied to an object, we typically do
*not* blindly differentiate but instead use a different representation
of the z*d/dz operator (see make_derivative_operator).
To subclass from this, define a __init__ method that initalises a
self._poly variable. This variable stores a polynomial. By convention
the generator is z*d/dz, and acts to the right of all coefficients.
Thus this poly
x**2 + 2*z*x + 1
represents the differential operator
(z*d/dz)**2 + 2*z**2*d/dz.
This class is used only in the implementation of the hypergeometric
function expansion algorithm.
"""
def apply(self, obj, op):
"""
Apply ``self`` to the object ``obj``, where the generator is ``op``.
>>> from sympy.simplify.hyperexpand import Operator
>>> from sympy.polys.polytools import Poly
>>> from sympy.abc import x, y, z
>>> op = Operator()
>>> op._poly = Poly(x**2 + z*x + y, x)
>>> op.apply(z**7, lambda f: f.diff(z))
y*z**7 + 7*z**7 + 42*z**5
"""
coeffs = self._poly.all_coeffs()
coeffs.reverse()
diffs = [obj]
for c in coeffs[1:]:
diffs.append(op(diffs[-1]))
r = coeffs[0]*diffs[0]
for c, d in zip(coeffs[1:], diffs[1:]):
r += c*d
return r
class MultOperator(Operator):
""" Simply multiply by a "constant" """
def __init__(self, p):
self._poly = Poly(p, _x)
class ShiftA(Operator):
""" Increment an upper index. """
def __init__(self, ai):
ai = sympify(ai)
if ai == 0:
raise ValueError('Cannot increment zero upper index.')
self._poly = Poly(_x/ai + 1, _x)
def __str__(self):
return '<Increment upper %s.>' % (1/self._poly.all_coeffs()[0])
class ShiftB(Operator):
""" Decrement a lower index. """
def __init__(self, bi):
bi = sympify(bi)
if bi == 1:
raise ValueError('Cannot decrement unit lower index.')
self._poly = Poly(_x/(bi - 1) + 1, _x)
def __str__(self):
return '<Decrement lower %s.>' % (1/self._poly.all_coeffs()[0] + 1)
class UnShiftA(Operator):
""" Decrement an upper index. """
def __init__(self, ap, bq, i, z):
""" Note: i counts from zero! """
ap, bq, i = list(map(sympify, [ap, bq, i]))
self._ap = ap
self._bq = bq
self._i = i
ap = list(ap)
bq = list(bq)
ai = ap.pop(i) - 1
if ai == 0:
raise ValueError('Cannot decrement unit upper index.')
m = Poly(z*ai, _x)
for a in ap:
m *= Poly(_x + a, _x)
A = Dummy('A')
n = D = Poly(ai*A - ai, A)
for b in bq:
n *= (D + b - 1)
b0 = -n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement upper index: '
'cancels with lower')
n = Poly(Poly(n.all_coeffs()[:-1], A).as_expr().subs(A, _x/ai + 1), _x)
self._poly = Poly((n - m)/b0, _x)
def __str__(self):
return '<Decrement upper index #%s of %s, %s.>' % (self._i,
self._ap, self._bq)
class UnShiftB(Operator):
""" Increment a lower index. """
def __init__(self, ap, bq, i, z):
""" Note: i counts from zero! """
ap, bq, i = list(map(sympify, [ap, bq, i]))
self._ap = ap
self._bq = bq
self._i = i
ap = list(ap)
bq = list(bq)
bi = bq.pop(i) + 1
if bi == 0:
raise ValueError('Cannot increment -1 lower index.')
m = Poly(_x*(bi - 1), _x)
for b in bq:
m *= Poly(_x + b - 1, _x)
B = Dummy('B')
D = Poly((bi - 1)*B - bi + 1, B)
n = Poly(z, B)
for a in ap:
n *= (D + a)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment index: cancels with upper')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, _x/(bi - 1) + 1), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment lower index #%s of %s, %s.>' % (self._i,
self._ap, self._bq)
class MeijerShiftA(Operator):
""" Increment an upper b index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(bi - _x, _x)
def __str__(self):
return '<Increment upper b=%s.>' % (self._poly.all_coeffs()[1])
class MeijerShiftB(Operator):
""" Decrement an upper a index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(1 - bi + _x, _x)
def __str__(self):
return '<Decrement upper a=%s.>' % (1 - self._poly.all_coeffs()[1])
class MeijerShiftC(Operator):
""" Increment a lower b index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(-bi + _x, _x)
def __str__(self):
return '<Increment lower b=%s.>' % (-self._poly.all_coeffs()[1])
class MeijerShiftD(Operator):
""" Decrement a lower a index. """
def __init__(self, bi):
bi = sympify(bi)
self._poly = Poly(bi - 1 - _x, _x)
def __str__(self):
return '<Decrement lower a=%s.>' % (self._poly.all_coeffs()[1] + 1)
class MeijerUnShiftA(Operator):
""" Decrement an upper b index. """
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
bi = bm.pop(i) - 1
m = Poly(1, _x)
for b in bm:
m *= Poly(b - _x, _x)
for b in bq:
m *= Poly(_x - b, _x)
A = Dummy('A')
D = Poly(bi - A, A)
n = Poly(z, A)
for a in an:
n *= (D + 1 - a)
for a in ap:
n *= (-D + a - 1)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement upper b index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], A).as_expr().subs(A, bi - _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Decrement upper b index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class MeijerUnShiftB(Operator):
""" Increment an upper a index. """
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
ai = an.pop(i) + 1
m = Poly(z, _x)
for a in an:
m *= Poly(1 - a + _x, _x)
for a in ap:
m *= Poly(a - 1 - _x, _x)
B = Dummy('B')
D = Poly(B + ai - 1, B)
n = Poly(1, B)
for b in bm:
n *= (-D + b)
for b in bq:
n *= (D - b)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment upper a index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, 1 - ai + _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment upper a index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class MeijerUnShiftC(Operator):
""" Decrement a lower b index. """
# XXX this is "essentially" the same as MeijerUnShiftA. This "essentially"
# can be made rigorous using the functional equation G(1/z) = G'(z),
# where G' denotes a G function of slightly altered parameters.
# However, sorting out the details seems harder than just coding it
# again.
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
bi = bq.pop(i) - 1
m = Poly(1, _x)
for b in bm:
m *= Poly(b - _x, _x)
for b in bq:
m *= Poly(_x - b, _x)
C = Dummy('C')
D = Poly(bi + C, C)
n = Poly(z, C)
for a in an:
n *= (D + 1 - a)
for a in ap:
n *= (-D + a - 1)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement lower b index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], C).as_expr().subs(C, _x - bi), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Decrement lower b index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class MeijerUnShiftD(Operator):
""" Increment a lower a index. """
# XXX This is essentially the same as MeijerUnShiftA.
# See comment at MeijerUnShiftC.
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
ai = ap.pop(i) + 1
m = Poly(z, _x)
for a in an:
m *= Poly(1 - a + _x, _x)
for a in ap:
m *= Poly(a - 1 - _x, _x)
B = Dummy('B') # - this is the shift operator `D_I`
D = Poly(ai - 1 - B, B)
n = Poly(1, B)
for b in bm:
n *= (-D + b)
for b in bq:
n *= (D - b)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment lower a index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, ai - 1 - _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment lower a index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class ReduceOrder(Operator):
""" Reduce Order by cancelling an upper and a lower index. """
def __new__(cls, ai, bj):
""" For convenience if reduction is not possible, return None. """
ai = sympify(ai)
bj = sympify(bj)
n = ai - bj
if not n.is_Integer or n < 0:
return None
if bj.is_integer and bj <= 0 and bj + n - 1 >= 0:
return None
expr = Operator.__new__(cls)
p = S(1)
for k in range(n):
p *= (_x + bj + k)/(bj + k)
expr._poly = Poly(p, _x)
expr._a = ai
expr._b = bj
return expr
@classmethod
def _meijer(cls, b, a, sign):
""" Cancel b + sign*s and a + sign*s
This is for meijer G functions. """
b = sympify(b)
a = sympify(a)
n = b - a
if n.is_negative or not n.is_Integer:
return None
expr = Operator.__new__(cls)
p = S(1)
for k in range(n):
p *= (sign*_x + a + k)
expr._poly = Poly(p, _x)
if sign == -1:
expr._a = b
expr._b = a
else:
expr._b = Add(1, a - 1, evaluate=False)
expr._a = Add(1, b - 1, evaluate=False)
return expr
@classmethod
def meijer_minus(cls, b, a):
return cls._meijer(b, a, -1)
@classmethod
def meijer_plus(cls, a, b):
return cls._meijer(1 - a, 1 - b, 1)
def __str__(self):
return '<Reduce order by cancelling upper %s with lower %s.>' % \
(self._a, self._b)
def _reduce_order(ap, bq, gen, key):
""" Order reduction algorithm used in Hypergeometric and Meijer G """
ap = list(ap)
bq = list(bq)
ap.sort(key=key)
bq.sort(key=key)
nap = []
# we will edit bq in place
operators = []
for a in ap:
op = None
for i in range(len(bq)):
op = gen(a, bq[i])
if op is not None:
bq.pop(i)
break
if op is None:
nap.append(a)
else:
operators.append(op)
return nap, bq, operators
def reduce_order(func):
"""
Given the hypergeometric function ``func``, find a sequence of operators to
reduces order as much as possible.
Return (newfunc, [operators]), where applying the operators to the
hypergeometric function newfunc yields func.
Examples
========
>>> from sympy.simplify.hyperexpand import reduce_order, Hyper_Function
>>> reduce_order(Hyper_Function((1, 2), (3, 4)))
(Hyper_Function((1, 2), (3, 4)), [])
>>> reduce_order(Hyper_Function((1,), (1,)))
(Hyper_Function((), ()), [<Reduce order by cancelling upper 1 with lower 1.>])
>>> reduce_order(Hyper_Function((2, 4), (3, 3)))
(Hyper_Function((2,), (3,)), [<Reduce order by cancelling
upper 4 with lower 3.>])
"""
nap, nbq, operators = _reduce_order(func.ap, func.bq, ReduceOrder, default_sort_key)
return Hyper_Function(Tuple(*nap), Tuple(*nbq)), operators
def reduce_order_meijer(func):
"""
Given the Meijer G function parameters, ``func``, find a sequence of
operators that reduces order as much as possible.
Return newfunc, [operators].
Examples
========
>>> from sympy.simplify.hyperexpand import (reduce_order_meijer,
... G_Function)
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [3, 4], [1, 2]))[0]
G_Function((4, 3), (5, 6), (3, 4), (2, 1))
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [3, 4], [1, 8]))[0]
G_Function((3,), (5, 6), (3, 4), (1,))
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [7, 5], [1, 5]))[0]
G_Function((3,), (), (), (1,))
>>> reduce_order_meijer(G_Function([3, 4], [5, 6], [7, 5], [5, 3]))[0]
G_Function((), (), (), ())
"""
nan, nbq, ops1 = _reduce_order(func.an, func.bq, ReduceOrder.meijer_plus,
lambda x: default_sort_key(-x))
nbm, nap, ops2 = _reduce_order(func.bm, func.ap, ReduceOrder.meijer_minus,
default_sort_key)
return G_Function(nan, nap, nbm, nbq), ops1 + ops2
def make_derivative_operator(M, z):
""" Create a derivative operator, to be passed to Operator.apply. """
def doit(C):
r = z*C.diff(z) + C*M
r = r.applyfunc(make_simp(z))
return r
return doit
def apply_operators(obj, ops, op):
"""
Apply the list of operators ``ops`` to object ``obj``, substituting
``op`` for the generator.
"""
res = obj
for o in reversed(ops):
res = o.apply(res, op)
return res
def devise_plan(target, origin, z):
"""
Devise a plan (consisting of shift and un-shift operators) to be applied
to the hypergeometric function ``target`` to yield ``origin``.
Returns a list of operators.
>>> from sympy.simplify.hyperexpand import devise_plan, Hyper_Function
>>> from sympy.abc import z
Nothing to do:
>>> devise_plan(Hyper_Function((1, 2), ()), Hyper_Function((1, 2), ()), z)
[]
>>> devise_plan(Hyper_Function((), (1, 2)), Hyper_Function((), (1, 2)), z)
[]
Very simple plans:
>>> devise_plan(Hyper_Function((2,), ()), Hyper_Function((1,), ()), z)
[<Increment upper 1.>]
>>> devise_plan(Hyper_Function((), (2,)), Hyper_Function((), (1,)), z)
[<Increment lower index #0 of [], [1].>]
Several buckets:
>>> from sympy import S
>>> devise_plan(Hyper_Function((1, S.Half), ()),
... Hyper_Function((2, S('3/2')), ()), z) #doctest: +NORMALIZE_WHITESPACE
[<Decrement upper index #0 of [3/2, 1], [].>,
<Decrement upper index #0 of [2, 3/2], [].>]
A slightly more complicated plan:
>>> devise_plan(Hyper_Function((1, 3), ()), Hyper_Function((2, 2), ()), z)
[<Increment upper 2.>, <Decrement upper index #0 of [2, 2], [].>]
Another more complicated plan: (note that the ap have to be shifted first!)
>>> devise_plan(Hyper_Function((1, -1), (2,)), Hyper_Function((3, -2), (4,)), z)
[<Decrement lower 3.>, <Decrement lower 4.>,
<Decrement upper index #1 of [-1, 2], [4].>,
<Decrement upper index #1 of [-1, 3], [4].>, <Increment upper -2.>]
"""
abuckets, bbuckets, nabuckets, nbbuckets = [sift(params, _mod1) for
params in (target.ap, target.bq, origin.ap, origin.bq)]
if len(list(abuckets.keys())) != len(list(nabuckets.keys())) or \
len(list(bbuckets.keys())) != len(list(nbbuckets.keys())):
raise ValueError('%s not reachable from %s' % (target, origin))
ops = []
def do_shifts(fro, to, inc, dec):
ops = []
for i in range(len(fro)):
if to[i] - fro[i] > 0:
sh = inc
ch = 1
else:
sh = dec
ch = -1
while to[i] != fro[i]:
ops += [sh(fro, i)]
fro[i] += ch
return ops
def do_shifts_a(nal, nbk, al, aother, bother):
""" Shift us from (nal, nbk) to (al, nbk). """
return do_shifts(nal, al, lambda p, i: ShiftA(p[i]),
lambda p, i: UnShiftA(p + aother, nbk + bother, i, z))
def do_shifts_b(nal, nbk, bk, aother, bother):
""" Shift us from (nal, nbk) to (nal, bk). """
return do_shifts(nbk, bk,
lambda p, i: UnShiftB(nal + aother, p + bother, i, z),
lambda p, i: ShiftB(p[i]))
for r in sorted(list(abuckets.keys()) + list(bbuckets.keys()), key=default_sort_key):
al = ()
nal = ()
bk = ()
nbk = ()
if r in abuckets:
al = abuckets[r]
nal = nabuckets[r]
if r in bbuckets:
bk = bbuckets[r]
nbk = nbbuckets[r]
if len(al) != len(nal) or len(bk) != len(nbk):
raise ValueError('%s not reachable from %s' % (target, origin))
al, nal, bk, nbk = [sorted(list(w), key=default_sort_key)
for w in [al, nal, bk, nbk]]
def others(dic, key):
l = []
for k, value in dic.items():
if k != key:
l += list(dic[k])
return l
aother = others(nabuckets, r)
bother = others(nbbuckets, r)
if len(al) == 0:
# there can be no complications, just shift the bs as we please
ops += do_shifts_b([], nbk, bk, aother, bother)
elif len(bk) == 0:
# there can be no complications, just shift the as as we please
ops += do_shifts_a(nal, [], al, aother, bother)
else:
namax = nal[-1]
amax = al[-1]
if nbk[0] - namax <= 0 or bk[0] - amax <= 0:
raise ValueError('Non-suitable parameters.')
if namax - amax > 0:
# we are going to shift down - first do the as, then the bs
ops += do_shifts_a(nal, nbk, al, aother, bother)
ops += do_shifts_b(al, nbk, bk, aother, bother)
else:
# we are going to shift up - first do the bs, then the as
ops += do_shifts_b(nal, nbk, bk, aother, bother)
ops += do_shifts_a(nal, bk, al, aother, bother)
nabuckets[r] = al
nbbuckets[r] = bk
ops.reverse()
return ops
def try_shifted_sum(func, z):
""" Try to recognise a hypergeometric sum that starts from k > 0. """
abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)
if len(abuckets[S(0)]) != 1:
return None
r = abuckets[S(0)][0]
if r <= 0:
return None
if not S(0) in bbuckets:
return None
l = list(bbuckets[S(0)])
l.sort()
k = l[0]
if k <= 0:
return None
nap = list(func.ap)
nap.remove(r)
nbq = list(func.bq)
nbq.remove(k)
k -= 1
nap = [x - k for x in nap]
nbq = [x - k for x in nbq]
ops = []
for n in range(r - 1):
ops.append(ShiftA(n + 1))
ops.reverse()
fac = factorial(k)/z**k
for a in nap:
fac /= rf(a, k)
for b in nbq:
fac *= rf(b, k)
ops += [MultOperator(fac)]
p = 0
for n in range(k):
m = z**n/factorial(n)
for a in nap:
m *= rf(a, n)
for b in nbq:
m /= rf(b, n)
p += m
return Hyper_Function(nap, nbq), ops, -p
def try_polynomial(func, z):
""" Recognise polynomial cases. Returns None if not such a case.
Requires order to be fully reduced. """
abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)
a0 = abuckets[S(0)]
b0 = bbuckets[S(0)]
a0.sort()
b0.sort()
al0 = [x for x in a0 if x <= 0]
bl0 = [x for x in b0 if x <= 0]
if bl0:
return oo
if not al0:
return None
a = al0[-1]
fac = 1
res = S(1)
for n in Tuple(*list(range(-a))):
fac *= z
fac /= n + 1
for a in func.ap:
fac *= a + n
for b in func.bq:
fac /= b + n
res += fac
return res
def try_lerchphi(func):
"""
Try to find an expression for Hyper_Function ``func`` in terms of Lerch
Transcendents.
Return None if no such expression can be found.
"""
# This is actually quite simple, and is described in Roach's paper,
# section 18.
# We don't need to implement the reduction to polylog here, this
# is handled by expand_func.
from sympy.matrices import Matrix, zeros
from sympy.polys import apart
# First we need to figure out if the summation coefficient is a rational
# function of the summation index, and construct that rational function.
abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)
paired = {}
for key, value in abuckets.items():
if key != 0 and not key in bbuckets:
return None
bvalue = bbuckets[key]
paired[key] = (list(value), list(bvalue))
bbuckets.pop(key, None)
if bbuckets != {}:
return None
if not S(0) in abuckets:
return None
aints, bints = paired[S(0)]
# Account for the additional n! in denominator
paired[S(0)] = (aints, bints + [1])
t = Dummy('t')
numer = S(1)
denom = S(1)
for key, (avalue, bvalue) in paired.items():
if len(avalue) != len(bvalue):
return None
# Note that since order has been reduced fully, all the b are
# bigger than all the a they differ from by an integer. In particular
# if there are any negative b left, this function is not well-defined.
for a, b in zip(avalue, bvalue):
if (a - b).is_positive:
k = a - b
numer *= rf(b + t, k)
denom *= rf(b, k)
else:
k = b - a
numer *= rf(a, k)
denom *= rf(a + t, k)
# Now do a partial fraction decomposition.
# We assemble two structures: a list monomials of pairs (a, b) representing
# a*t**b (b a non-negative integer), and a dict terms, where
# terms[a] = [(b, c)] means that there is a term b/(t-a)**c.
part = apart(numer/denom, t)
args = Add.make_args(part)
monomials = []
terms = {}
for arg in args:
numer, denom = arg.as_numer_denom()
if not denom.has(t):
p = Poly(numer, t)
if not p.is_monomial:
raise TypeError("p should be monomial")
((b, ), a) = p.LT()
monomials += [(a/denom, b)]
continue
if numer.has(t):
raise NotImplementedError('Need partial fraction decomposition'
' with linear denominators')
indep, [dep] = denom.as_coeff_mul(t)
n = 1
if dep.is_Pow:
n = dep.exp
dep = dep.base
if dep == t:
a == 0
elif dep.is_Add:
a, tmp = dep.as_independent(t)
b = 1
if tmp != t:
b, _ = tmp.as_independent(t)
if dep != b*t + a:
raise NotImplementedError('unrecognised form %s' % dep)
a /= b
indep *= b**n
else:
raise NotImplementedError('unrecognised form of partial fraction')
terms.setdefault(a, []).append((numer/indep, n))
# Now that we have this information, assemble our formula. All the
# monomials yield rational functions and go into one basis element.
# The terms[a] are related by differentiation. If the largest exponent is
# n, we need lerchphi(z, k, a) for k = 1, 2, ..., n.
# deriv maps a basis to its derivative, expressed as a C(z)-linear
# combination of other basis elements.
deriv = {}
coeffs = {}
z = Dummy('z')
monomials.sort(key=lambda x: x[1])
mon = {0: 1/(1 - z)}
if monomials:
for k in range(monomials[-1][1]):
mon[k + 1] = z*mon[k].diff(z)
for a, n in monomials:
coeffs.setdefault(S(1), []).append(a*mon[n])
for a, l in terms.items():
for c, k in l:
coeffs.setdefault(lerchphi(z, k, a), []).append(c)
l.sort(key=lambda x: x[1])
for k in range(2, l[-1][1] + 1):
deriv[lerchphi(z, k, a)] = [(-a, lerchphi(z, k, a)),
(1, lerchphi(z, k - 1, a))]
deriv[lerchphi(z, 1, a)] = [(-a, lerchphi(z, 1, a)),
(1/(1 - z), S(1))]
trans = {}
for n, b in enumerate([S(1)] + list(deriv.keys())):
trans[b] = n
basis = [expand_func(b) for (b, _) in sorted(list(trans.items()),
key=lambda x:x[1])]
B = Matrix(basis)
C = Matrix([[0]*len(B)])
for b, c in coeffs.items():
C[trans[b]] = Add(*c)
M = zeros(len(B))
for b, l in deriv.items():
for c, b2 in l:
M[trans[b], trans[b2]] = c
return Formula(func, z, None, [], B, C, M)
def build_hypergeometric_formula(func):
"""
Create a formula object representing the hypergeometric function ``func``.
"""
# We know that no `ap` are negative integers, otherwise "detect poly"
# would have kicked in. However, `ap` could be empty. In this case we can
# use a different basis.
# I'm not aware of a basis that works in all cases.
from sympy import zeros, Matrix, eye
z = Dummy('z')
if func.ap:
afactors = [_x + a for a in func.ap]
bfactors = [_x + b - 1 for b in func.bq]
expr = _x*Mul(*bfactors) - z*Mul(*afactors)
poly = Poly(expr, _x)
n = poly.degree()
basis = []
M = zeros(n)
for k in range(n):
a = func.ap[0] + k
basis += [hyper([a] + list(func.ap[1:]), func.bq, z)]
if k < n - 1:
M[k, k] = -a
M[k, k + 1] = a
B = Matrix(basis)
C = Matrix([[1] + [0]*(n - 1)])
derivs = [eye(n)]
for k in range(n):
derivs.append(M*derivs[k])
l = poly.all_coeffs()
l.reverse()
res = [0]*n
for k, c in enumerate(l):
for r, d in enumerate(C*derivs[k]):
res[r] += c*d
for k, c in enumerate(res):
M[n - 1, k] = -c/derivs[n - 1][0, n - 1]/poly.all_coeffs()[0]
return Formula(func, z, None, [], B, C, M)
else:
# Since there are no `ap`, none of the `bq` can be non-positive
# integers.
basis = []
bq = list(func.bq[:])
for i in range(len(bq)):
basis += [hyper([], bq, z)]
bq[i] += 1
basis += [hyper([], bq, z)]
B = Matrix(basis)
n = len(B)
C = Matrix([[1] + [0]*(n - 1)])
M = zeros(n)
M[0, n - 1] = z/Mul(*func.bq)
for k in range(1, n):
M[k, k - 1] = func.bq[k - 1]
M[k, k] = -func.bq[k - 1]
return Formula(func, z, None, [], B, C, M)
def hyperexpand_special(ap, bq, z):
"""
Try to find a closed-form expression for hyper(ap, bq, z), where ``z``
is supposed to be a "special" value, e.g. 1.
This function tries various of the classical summation formulae
(Gauss, Saalschuetz, etc).
"""
# This code is very ad-hoc. There are many clever algorithms
# (notably Zeilberger's) related to this problem.
# For now we just want a few simple cases to work.
p, q = len(ap), len(bq)
z_ = z
z = unpolarify(z)
if z == 0:
return S.One
if p == 2 and q == 1:
# 2F1
a, b, c = ap + bq
if z == 1:
# Gauss
return gamma(c - a - b)*gamma(c)/gamma(c - a)/gamma(c - b)
if z == -1 and simplify(b - a + c) == 1:
b, a = a, b
if z == -1 and simplify(a - b + c) == 1:
# Kummer
if b.is_integer and b.is_negative:
return 2*cos(pi*b/2)*gamma(-b)*gamma(b - a + 1) \
/gamma(-b/2)/gamma(b/2 - a + 1)
else:
return gamma(b/2 + 1)*gamma(b - a + 1) \
/gamma(b + 1)/gamma(b/2 - a + 1)
# TODO tons of more formulae
# investigate what algorithms exist
return hyper(ap, bq, z_)
_collection = None
def _hyperexpand(func, z, ops0=[], z0=Dummy('z0'), premult=1, prem=0,
rewrite='default'):
"""
Try to find an expression for the hypergeometric function ``func``.
The result is expressed in terms of a dummy variable z0. Then it
is multiplied by premult. Then ops0 is applied.
premult must be a*z**prem for some a independent of z.
"""
if z is S.Zero:
return S.One
z = polarify(z, subs=False)
if rewrite == 'default':
rewrite = 'nonrepsmall'
def carryout_plan(f, ops):
C = apply_operators(f.C.subs(f.z, z0), ops,
make_derivative_operator(f.M.subs(f.z, z0), z0))
from sympy import eye
C = apply_operators(C, ops0,
make_derivative_operator(f.M.subs(f.z, z0)
+ prem*eye(f.M.shape[0]), z0))
if premult == 1:
C = C.applyfunc(make_simp(z0))
r = C*f.B.subs(f.z, z0)*premult
res = r[0].subs(z0, z)
if rewrite:
res = res.rewrite(rewrite)
return res
# TODO
# The following would be possible:
# *) PFD Duplication (see Kelly Roach's paper)
# *) In a similar spirit, try_lerchphi() can be generalised considerably.
global _collection
if _collection is None:
_collection = FormulaCollection()
debug('Trying to expand hypergeometric function ', func)
# First reduce order as much as possible.
func, ops = reduce_order(func)
if ops:
debug(' Reduced order to ', func)
else:
debug(' Could not reduce order.')
# Now try polynomial cases
res = try_polynomial(func, z0)
if res is not None:
debug(' Recognised polynomial.')
p = apply_operators(res, ops, lambda f: z0*f.diff(z0))
p = apply_operators(p*premult, ops0, lambda f: z0*f.diff(z0))
return unpolarify(simplify(p).subs(z0, z))
# Try to recognise a shifted sum.
p = S(0)
res = try_shifted_sum(func, z0)
if res is not None:
func, nops, p = res
debug(' Recognised shifted sum, reduced order to ', func)
ops += nops
# apply the plan for poly
p = apply_operators(p, ops, lambda f: z0*f.diff(z0))
p = apply_operators(p*premult, ops0, lambda f: z0*f.diff(z0))
p = simplify(p).subs(z0, z)
# Try special expansions early.
if unpolarify(z) in [1, -1] and (len(func.ap), len(func.bq)) == (2, 1):
f = build_hypergeometric_formula(func)
r = carryout_plan(f, ops).replace(hyper, hyperexpand_special)
if not r.has(hyper):
return r + p
# Try to find a formula in our collection
formula = _collection.lookup_origin(func)
# Now try a lerch phi formula
if formula is None:
formula = try_lerchphi(func)
if formula is None:
debug(' Could not find an origin. ',
'Will return answer in terms of '
'simpler hypergeometric functions.')
formula = build_hypergeometric_formula(func)
debug(' Found an origin: ', formula.closed_form, ' ', formula.func)
# We need to find the operators that convert formula into func.
ops += devise_plan(func, formula.func, z0)
# Now carry out the plan.
r = carryout_plan(formula, ops) + p
return powdenest(r, polar=True).replace(hyper, hyperexpand_special)
def devise_plan_meijer(fro, to, z):
"""
Find operators to convert G-function ``fro`` into G-function ``to``.
It is assumed that fro and to have the same signatures, and that in fact
any corresponding pair of parameters differs by integers, and a direct path
is possible. I.e. if there are parameters a1 b1 c1 and a2 b2 c2 it is
assumed that a1 can be shifted to a2, etc. The only thing this routine
determines is the order of shifts to apply, nothing clever will be tried.
It is also assumed that fro is suitable.
>>> from sympy.simplify.hyperexpand import (devise_plan_meijer,
... G_Function)
>>> from sympy.abc import z
Empty plan:
>>> devise_plan_meijer(G_Function([1], [2], [3], [4]),
... G_Function([1], [2], [3], [4]), z)
[]
Very simple plans:
>>> devise_plan_meijer(G_Function([0], [], [], []),
... G_Function([1], [], [], []), z)
[<Increment upper a index #0 of [0], [], [], [].>]
>>> devise_plan_meijer(G_Function([0], [], [], []),
... G_Function([-1], [], [], []), z)
[<Decrement upper a=0.>]
>>> devise_plan_meijer(G_Function([], [1], [], []),
... G_Function([], [2], [], []), z)
[<Increment lower a index #0 of [], [1], [], [].>]
Slightly more complicated plans:
>>> devise_plan_meijer(G_Function([0], [], [], []),
... G_Function([2], [], [], []), z)
[<Increment upper a index #0 of [1], [], [], [].>,
<Increment upper a index #0 of [0], [], [], [].>]
>>> devise_plan_meijer(G_Function([0], [], [0], []),
... G_Function([-1], [], [1], []), z)
[<Increment upper b=0.>, <Decrement upper a=0.>]
Order matters:
>>> devise_plan_meijer(G_Function([0], [], [0], []),
... G_Function([1], [], [1], []), z)
[<Increment upper a index #0 of [0], [], [1], [].>, <Increment upper b=0.>]
"""
# TODO for now, we use the following simple heuristic: inverse-shift
# when possible, shift otherwise. Give up if we cannot make progress.
def try_shift(f, t, shifter, diff, counter):
""" Try to apply ``shifter`` in order to bring some element in ``f``
nearer to its counterpart in ``to``. ``diff`` is +/- 1 and
determines the effect of ``shifter``. Counter is a list of elements
blocking the shift.
Return an operator if change was possible, else None.
"""
for idx, (a, b) in enumerate(zip(f, t)):
if (
(a - b).is_integer and (b - a)/diff > 0 and
all(a != x for x in counter)):
sh = shifter(idx)
f[idx] += diff
return sh
fan = list(fro.an)
fap = list(fro.ap)
fbm = list(fro.bm)
fbq = list(fro.bq)
ops = []
change = True
while change:
change = False
op = try_shift(fan, to.an,
lambda i: MeijerUnShiftB(fan, fap, fbm, fbq, i, z),
1, fbm + fbq)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fap, to.ap,
lambda i: MeijerUnShiftD(fan, fap, fbm, fbq, i, z),
1, fbm + fbq)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbm, to.bm,
lambda i: MeijerUnShiftA(fan, fap, fbm, fbq, i, z),
-1, fan + fap)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbq, to.bq,
lambda i: MeijerUnShiftC(fan, fap, fbm, fbq, i, z),
-1, fan + fap)
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fan, to.an, lambda i: MeijerShiftB(fan[i]), -1, [])
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fap, to.ap, lambda i: MeijerShiftD(fap[i]), -1, [])
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbm, to.bm, lambda i: MeijerShiftA(fbm[i]), 1, [])
if op is not None:
ops += [op]
change = True
continue
op = try_shift(fbq, to.bq, lambda i: MeijerShiftC(fbq[i]), 1, [])
if op is not None:
ops += [op]
change = True
continue
if fan != list(to.an) or fap != list(to.ap) or fbm != list(to.bm) or \
fbq != list(to.bq):
raise NotImplementedError('Could not devise plan.')
ops.reverse()
return ops
_meijercollection = None
def _meijergexpand(func, z0, allow_hyper=False, rewrite='default'):
"""
Try to find an expression for the Meijer G function specified
by the G_Function ``func``. If ``allow_hyper`` is True, then returning
an expression in terms of hypergeometric functions is allowed.
Currently this just does slater's theorem.
"""
global _meijercollection
if _meijercollection is None:
_meijercollection = MeijerFormulaCollection()
if rewrite == 'default':
rewrite = None
func0 = func
debug('Try to expand Meijer G function corresponding to ', func)
# We will play games with analytic continuation - rather use a fresh symbol
z = Dummy('z')
func, ops = reduce_order_meijer(func)
if ops:
debug(' Reduced order to ', func)
else:
debug(' Could not reduce order.')
# Try to find a direct formula
f = _meijercollection.lookup_origin(func)
if f is not None:
debug(' Found a Meijer G formula: ', f.func)
ops += devise_plan_meijer(f.func, func, z)
# Now carry out the plan.
C = apply_operators(f.C.subs(f.z, z), ops,
make_derivative_operator(f.M.subs(f.z, z), z))
C = C.applyfunc(make_simp(z))
r = C*f.B.subs(f.z, z)
r = r[0].subs(z, z0)
return powdenest(r, polar=True)
debug(" Could not find a direct formula. Trying Slater's theorem.")
# TODO the following would be possible:
# *) Paired Index Theorems
# *) PFD Duplication
# (See Kelly Roach's paper for details on either.)
#
# TODO Also, we tend to create combinations of gamma functions that can be
# simplified.
def can_do(pbm, pap):
""" Test if slater applies. """
for i in pbm:
if len(pbm[i]) > 1:
l = 0
if i in pap:
l = len(pap[i])
if l + 1 < len(pbm[i]):
return False
return True
def do_slater(an, bm, ap, bq, z, zfinal):
# zfinal is the value that will eventually be substituted for z.
# We pass it to _hyperexpand to improve performance.
func = G_Function(an, bm, ap, bq)
_, pbm, pap, _ = func.compute_buckets()
if not can_do(pbm, pap):
return S(0), False
cond = len(an) + len(ap) < len(bm) + len(bq)
if len(an) + len(ap) == len(bm) + len(bq):
cond = abs(z) < 1
if cond is False:
return S(0), False
res = S(0)
for m in pbm:
if len(pbm[m]) == 1:
bh = pbm[m][0]
fac = 1
bo = list(bm)
bo.remove(bh)
for bj in bo:
fac *= gamma(bj - bh)
for aj in an:
fac *= gamma(1 + bh - aj)
for bj in bq:
fac /= gamma(1 + bh - bj)
for aj in ap:
fac /= gamma(aj - bh)
nap = [1 + bh - a for a in list(an) + list(ap)]
nbq = [1 + bh - b for b in list(bo) + list(bq)]
k = polar_lift(S(-1)**(len(ap) - len(bm)))
harg = k*zfinal
# NOTE even though k "is" +-1, this has to be t/k instead of
# t*k ... we are using polar numbers for consistency!
premult = (t/k)**bh
hyp = _hyperexpand(Hyper_Function(nap, nbq), harg, ops,
t, premult, bh, rewrite=None)
res += fac * hyp
else:
b_ = pbm[m][0]
ki = [bi - b_ for bi in pbm[m][1:]]
u = len(ki)
li = [ai - b_ for ai in pap[m][:u + 1]]
bo = list(bm)
for b in pbm[m]:
bo.remove(b)
ao = list(ap)
for a in pap[m][:u]:
ao.remove(a)
lu = li[-1]
di = [l - k for (l, k) in zip(li, ki)]
# We first work out the integrand:
s = Dummy('s')
integrand = z**s
for b in bm:
integrand *= gamma(b - s)
for a in an:
integrand *= gamma(1 - a + s)
for b in bq:
integrand /= gamma(1 - b + s)
for a in ap:
integrand /= gamma(a - s)
# Now sum the finitely many residues:
# XXX This speeds up some cases - is it a good idea?
integrand = expand_func(integrand)
for r in range(lu):
resid = residue(integrand, s, b_ + r)
resid = apply_operators(resid, ops, lambda f: z*f.diff(z))
res -= resid
# Now the hypergeometric term.
au = b_ + lu
k = polar_lift(S(-1)**(len(ao) + len(bo) + 1))
harg = k*zfinal
premult = (t/k)**au
nap = [1 + au - a for a in list(an) + list(ap)] + [1]
nbq = [1 + au - b for b in list(bm) + list(bq)]
hyp = _hyperexpand(Hyper_Function(nap, nbq), harg, ops,
t, premult, au, rewrite=None)
C = S(-1)**(lu)/factorial(lu)
for i in range(u):
C *= S(-1)**di[i]/rf(lu - li[i] + 1, di[i])
for a in an:
C *= gamma(1 - a + au)
for b in bo:
C *= gamma(b - au)
for a in ao:
C /= gamma(a - au)
for b in bq:
C /= gamma(1 - b + au)
res += C*hyp
return res, cond
t = Dummy('t')
slater1, cond1 = do_slater(func.an, func.bm, func.ap, func.bq, z, z0)
def tr(l):
return [1 - x for x in l]
for op in ops:
op._poly = Poly(op._poly.subs({z: 1/t, _x: -_x}), _x)
slater2, cond2 = do_slater(tr(func.bm), tr(func.an), tr(func.bq), tr(func.ap),
t, 1/z0)
slater1 = powdenest(slater1.subs(z, z0), polar=True)
slater2 = powdenest(slater2.subs(t, 1/z0), polar=True)
if not isinstance(cond2, bool):
cond2 = cond2.subs(t, 1/z)
m = func(z)
if m.delta > 0 or \
(m.delta == 0 and len(m.ap) == len(m.bq) and
(re(m.nu) < -1) is not False and polar_lift(z0) == polar_lift(1)):
# The condition delta > 0 means that the convergence region is
# connected. Any expression we find can be continued analytically
# to the entire convergence region.
# The conditions delta==0, p==q, re(nu) < -1 imply that G is continuous
# on the positive reals, so the values at z=1 agree.
if cond1 is not False:
cond1 = True
if cond2 is not False:
cond2 = True
if cond1 is True:
slater1 = slater1.rewrite(rewrite or 'nonrep')
else:
slater1 = slater1.rewrite(rewrite or 'nonrepsmall')
if cond2 is True:
slater2 = slater2.rewrite(rewrite or 'nonrep')
else:
slater2 = slater2.rewrite(rewrite or 'nonrepsmall')
if not isinstance(cond1, bool):
cond1 = cond1.subs(z, z0)
if not isinstance(cond2, bool):
cond2 = cond2.subs(z, z0)
def weight(expr, cond):
if cond is True:
c0 = 0
elif cond is False:
c0 = 1
else:
c0 = 2
if expr.has(oo, zoo, -oo, nan):
# XXX this actually should not happen, but consider
# S('meijerg(((0, -1/2, 0, -1/2, 1/2), ()), ((0,),
# (-1/2, -1/2, -1/2, -1)), exp_polar(I*pi))/4')
c0 = 3
return (c0, expr.count(hyper), expr.count_ops())
w1 = weight(slater1, cond1)
w2 = weight(slater2, cond2)
if min(w1, w2) <= (0, 1, oo):
if w1 < w2:
return slater1
else:
return slater2
if max(w1[0], w2[0]) <= 1 and max(w1[1], w2[1]) <= 1:
return Piecewise((slater1, cond1), (slater2, cond2), (func0(z0), True))
# We couldn't find an expression without hypergeometric functions.
# TODO it would be helpful to give conditions under which the integral
# is known to diverge.
r = Piecewise((slater1, cond1), (slater2, cond2), (func0(z0), True))
if r.has(hyper) and not allow_hyper:
debug(' Could express using hypergeometric functions, '
'but not allowed.')
if not r.has(hyper) or allow_hyper:
return r
return func0(z0)
def hyperexpand(f, allow_hyper=False, rewrite='default'):
"""
Expand hypergeometric functions. If allow_hyper is True, allow partial
simplification (that is a result different from input,
but still containing hypergeometric functions).
Examples
========
>>> from sympy.simplify.hyperexpand import hyperexpand
>>> from sympy.functions import hyper
>>> from sympy.abc import z
>>> hyperexpand(hyper([], [], z))
exp(z)
Non-hyperegeometric parts of the expression and hypergeometric expressions
that are not recognised are left unchanged:
>>> hyperexpand(1 + hyper([1, 1, 1], [], z))
hyper((1, 1, 1), (), z) + 1
"""
f = sympify(f)
def do_replace(ap, bq, z):
r = _hyperexpand(Hyper_Function(ap, bq), z, rewrite=rewrite)
if r is None:
return hyper(ap, bq, z)
else:
return r
def do_meijer(ap, bq, z):
r = _meijergexpand(G_Function(ap[0], ap[1], bq[0], bq[1]), z,
allow_hyper, rewrite=rewrite)
if not r.has(nan, zoo, oo, -oo):
return r
return f.replace(hyper, do_replace).replace(meijerg, do_meijer)
|
Nexenta/cinder | refs/heads/master | cinder/tests/unit/scheduler/test_scheduler.py | 1 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from oslo_config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder.message import defined_messages
from cinder import objects
from cinder.objects import fields
from cinder.scheduler import driver
from cinder.scheduler import filter_scheduler
from cinder.scheduler import manager
from cinder import test
from cinder.tests.unit.consistencygroup import fake_consistencygroup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils
CONF = cfg.CONF
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'cinder.scheduler.driver.Scheduler'
class AnException(Exception):
pass
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.manager = self.manager_cls()
self.manager._startup_delay = False
self.context = context.get_admin_context()
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
@mock.patch('eventlet.sleep')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities')
def test_init_host_with_rpc(self, publish_capabilities_mock, sleep_mock):
self.manager._startup_delay = True
self.manager.init_host_with_rpc()
publish_capabilities_mock.assert_called_once_with(mock.ANY)
sleep_mock.assert_called_once_with(CONF.periodic_interval)
self.assertFalse(self.manager._startup_delay)
@mock.patch('cinder.objects.service.Service.get_minimum_rpc_version')
@mock.patch('cinder.objects.service.Service.get_minimum_obj_version')
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-volume': '1.3'})
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-volume': '1.4'})
def test_reset(self, get_min_obj, get_min_rpc):
mgr = self.manager_cls()
volume_rpcapi = mgr.driver.volume_rpcapi
self.assertEqual('1.3', volume_rpcapi.client.version_cap)
self.assertEqual('1.4',
volume_rpcapi.client.serializer._base.version_cap)
get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current()
mgr.reset()
volume_rpcapi = mgr.driver.volume_rpcapi
self.assertEqual(get_min_rpc.return_value,
volume_rpcapi.client.version_cap)
self.assertEqual(get_min_obj.return_value,
volume_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(volume_rpcapi.client.serializer._base.manifest)
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_empty_dict(self, _mock_update_cap):
# Test no capabilities passes empty dictionary
service = 'fake_service'
host = 'fake_host'
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host)
_mock_update_cap.assert_called_once_with(service, host, {})
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_correct(self, _mock_update_cap):
# Test capabilities passes correctly
service = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host,
capabilities=capabilities)
_mock_update_cap.assert_called_once_with(service, host, capabilities)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.message.api.API.create')
@mock.patch('cinder.db.volume_update')
def test_create_volume_exception_puts_volume_in_error_state(
self, _mock_volume_update, _mock_message_create,
_mock_sched_create):
# Test NoValidHost exception behavior for create_volume.
# Puts the volume in 'error' state and eats the exception.
_mock_sched_create.side_effect = exception.NoValidHost(reason="")
volume = fake_volume.fake_volume_obj(self.context)
topic = 'fake_topic'
request_spec = {'volume_id': volume.id,
'volume': {'id': volume.id, '_name_id': None,
'metadata': {}, 'admin_metadata': {},
'glance_metadata': {}}}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
self.manager.create_volume(self.context, topic, volume.id,
request_spec=request_spec,
filter_properties={},
volume=volume)
_mock_volume_update.assert_called_once_with(self.context,
volume.id,
{'status': 'error'})
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
_mock_message_create.assert_called_once_with(
self.context, defined_messages.UNABLE_TO_ALLOCATE,
self.context.project_id, resource_type='VOLUME',
resource_uuid=volume.id)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('eventlet.sleep')
def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create):
volume = fake_volume.fake_volume_obj(self.context)
topic = 'fake_topic'
request_spec = {'volume_id': volume.id}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
self.manager.create_volume(self.context, topic, volume.id,
request_spec=request_spec,
filter_properties={},
volume=volume)
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
self.assertFalse(_mock_sleep.called)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.scheduler.driver.Scheduler.is_ready')
@mock.patch('eventlet.sleep')
def test_create_volume_delay_scheduled_after_3_tries(self, _mock_sleep,
_mock_is_ready,
_mock_sched_create):
self.manager._startup_delay = True
volume = fake_volume.fake_volume_obj(self.context)
topic = 'fake_topic'
request_spec = {'volume_id': volume.id}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
_mock_is_ready.side_effect = [False, False, True]
self.manager.create_volume(self.context, topic, volume.id,
request_spec=request_spec,
filter_properties={},
volume=volume)
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
calls = [mock.call(1)] * 2
_mock_sleep.assert_has_calls(calls)
self.assertEqual(2, _mock_sleep.call_count)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.scheduler.driver.Scheduler.is_ready')
@mock.patch('eventlet.sleep')
def test_create_volume_delay_scheduled_in_1_try(self, _mock_sleep,
_mock_is_ready,
_mock_sched_create):
self.manager._startup_delay = True
volume = fake_volume.fake_volume_obj(self.context)
topic = 'fake_topic'
request_spec = {'volume_id': volume.id}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
_mock_is_ready.return_value = True
self.manager.create_volume(self.context, topic, volume.id,
request_spec=request_spec,
filter_properties={},
volume=volume)
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
self.assertFalse(_mock_sleep.called)
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
@mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_host_passes,
_mock_volume_get):
# Test NoValidHost exception behavior for migrate_volume_to_host.
# Puts the volume in 'error_migrating' state and eats the exception.
fake_updates = {'migration_status': 'error'}
self._test_migrate_volume_exception_returns_volume_state(
_mock_volume_update, _mock_host_passes, _mock_volume_get,
'available', fake_updates)
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
@mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state_maintenance(
self, _mock_volume_update, _mock_host_passes,
_mock_volume_get):
fake_updates = {'status': 'available',
'migration_status': 'error'}
self._test_migrate_volume_exception_returns_volume_state(
_mock_volume_update, _mock_host_passes, _mock_volume_get,
'maintenance', fake_updates)
def _test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_host_passes,
_mock_volume_get, status, fake_updates):
volume = tests_utils.create_volume(self.context,
status=status,
previous_status='available')
fake_volume_id = volume.id
topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id}
_mock_host_passes.side_effect = exception.NoValidHost(reason="")
_mock_volume_get.return_value = volume
self.manager.migrate_volume_to_host(self.context, topic,
fake_volume_id, 'host', True,
request_spec=request_spec,
filter_properties={},
volume=volume)
_mock_volume_update.assert_called_once_with(self.context,
fake_volume_id,
fake_updates)
_mock_host_passes.assert_called_once_with(self.context, 'host',
request_spec, {})
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.db.volume_attachment_get_all_by_volume_id')
@mock.patch('cinder.quota.QUOTAS.rollback')
def test_retype_volume_exception_returns_volume_state(
self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update):
# Test NoValidHost exception behavior for retype.
# Puts the volume in original state and eats the exception.
volume = tests_utils.create_volume(self.context,
status='retyping',
previous_status='in-use')
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume_attach = tests_utils.attach_volume(self.context, volume.id,
instance_uuid, None,
'/dev/fake')
_mock_vol_attachment_get.return_value = [volume_attach]
topic = 'fake_topic'
reservations = mock.sentinel.reservations
request_spec = {'volume_id': volume.id, 'volume_type': {'id': 3},
'migration_policy': 'on-demand',
'quota_reservations': reservations}
_mock_vol_update.return_value = {'status': 'in-use'}
_mock_find_retype_host = mock.Mock(
side_effect=exception.NoValidHost(reason=""))
orig_retype = self.manager.driver.find_retype_host
self.manager.driver.find_retype_host = _mock_find_retype_host
self.manager.retype(self.context, topic, volume.id,
request_spec=request_spec,
filter_properties={},
volume=volume)
_mock_find_retype_host.assert_called_once_with(self.context,
request_spec, {},
'on-demand')
quota_rollback.assert_called_once_with(self.context, reservations)
_mock_vol_update.assert_called_once_with(self.context, volume.id,
{'status': 'in-use'})
self.manager.driver.find_retype_host = orig_retype
def test_create_consistencygroup_exceptions(self):
with mock.patch.object(filter_scheduler.FilterScheduler,
'schedule_create_consistencygroup') as mock_cg:
original_driver = self.manager.driver
consistencygroup_obj = \
fake_consistencygroup.fake_consistencyobject_obj(self.context)
self.manager.driver = filter_scheduler.FilterScheduler
LOG = self.mock_object(manager, 'LOG')
self.mock_object(db, 'consistencygroup_update')
ex = exception.CinderException('test')
mock_cg.side_effect = ex
group_id = fake.CONSISTENCY_GROUP_ID
self.assertRaises(exception.CinderException,
self.manager.create_consistencygroup,
self.context,
'volume',
consistencygroup_obj)
self.assertGreater(LOG.exception.call_count, 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': (
fields.ConsistencyGroupStatus.ERROR)})
mock_cg.reset_mock()
LOG.exception.reset_mock()
db.consistencygroup_update.reset_mock()
mock_cg.side_effect = exception.NoValidHost(
reason="No weighed hosts available")
self.manager.create_consistencygroup(
self.context, 'volume', consistencygroup_obj)
self.assertGreater(LOG.error.call_count, 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': (
fields.ConsistencyGroupStatus.ERROR)})
self.manager.driver = original_driver
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
self.topic = 'fake_topic'
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities(self, _mock_update_cap):
service_name = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.driver.update_service_capabilities(service_name, host,
capabilities)
_mock_update_cap.assert_called_once_with(service_name, host,
capabilities)
@mock.patch('cinder.scheduler.host_manager.HostManager.'
'has_all_capabilities', return_value=False)
def test_is_ready(self, _mock_has_caps):
ready = self.driver.is_ready()
_mock_has_caps.assert_called_once_with()
self.assertFalse(ready)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test schedule driver class.
Test cases for base scheduler driver class methods
that will fail if the driver is changed.
"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.objects.volume.Volume.get_by_id')
def test_volume_host_update_db(self, _mock_volume_get, _mock_vol_update):
volume = fake_volume.fake_volume_obj(self.context)
_mock_volume_get.return_value = volume
driver.volume_update_db(self.context, volume.id, 'fake_host')
scheduled_at = volume.scheduled_at.replace(tzinfo=None)
_mock_vol_update.assert_called_once_with(
self.context, volume.id, {'host': 'fake_host',
'scheduled_at': scheduled_at})
|
Lekanich/intellij-community | refs/heads/master | python/helpers/python-skeletons/lettuce/__init__.py | 70 | # coding=utf-8
__author__ = 'Ilya.Kazakevich'
|
fengbaicanhe/intellij-community | refs/heads/master | python/testData/codeInsight/controlflow/listcomp.py | 83 | [(x, y) for k, v in params.items()
if k
for x, y in v
if x > y]
|
wanghe4096/website | refs/heads/master | website/models.py | 1 | from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
import datetime
# Create your models here.
def datetime_now():
return datetime.datetime.now()
class Organization(models.Model):
organization_name = models.CharField(max_length=128, unique=True)
domain_name = models.CharField(max_length=256, unique=True)
sentry_instance = models.CharField(max_length=256)
class UserDetail(models.Model):
email = models.EmailField(max_length=256, )
password = models.CharField(max_length=128, null=True, blank=True)
phone = models.CharField(max_length=12, null=True)
name = models.CharField(max_length=256, null=True)
company = models.CharField(max_length=256, null=True)
server_count = models.IntegerField(null=True)
user = models.ForeignKey(User)
org_name = models.CharField(max_length=128, null=True)
domain_name = models.CharField(max_length=128, null=True)
def send_activation_email(self, site):
pass
class SentryInstance(models.Model):
sentry_instance_name = models.CharField(max_length=128, unique=True)
sentry_instance_url_prefix = models.CharField(max_length=250)
client_id = models.CharField(max_length=512, null=True)
client_secret = models.CharField(max_length=512, null=True)
|
mpihlak/skytools-dev | refs/heads/master | python/pgq/__init__.py | 1 | """PgQ framework for Python."""
__pychecker__ = 'no-miximport'
import pgq.event
import pgq.consumer
import pgq.remoteconsumer
import pgq.producer
import pgq.ticker
import pgq.maint
import pgq.status
import pgq.cascade
import pgq.cascade.nodeinfo
import pgq.cascade.admin
import pgq.cascade.consumer
import pgq.cascade.worker
from pgq.event import *
from pgq.consumer import *
from pgq.coopconsumer import *
from pgq.remoteconsumer import *
from pgq.producer import *
from pgq.ticker import *
from pgq.maint import *
from pgq.status import *
from pgq.cascade.nodeinfo import *
from pgq.cascade.admin import *
from pgq.cascade.consumer import *
from pgq.cascade.worker import *
__all__ = (
pgq.event.__all__ +
pgq.consumer.__all__ +
pgq.coopconsumer.__all__ +
pgq.remoteconsumer.__all__ +
pgq.cascade.nodeinfo.__all__ +
pgq.cascade.admin.__all__ +
pgq.cascade.consumer.__all__ +
pgq.cascade.worker.__all__ +
pgq.producer.__all__ +
pgq.ticker.__all__ +
pgq.maint.__all__ +
pgq.status.__all__ )
|
inim4/googletest | refs/heads/master | test/gtest_xml_outfiles_test.py | 2526 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
|
exocad/exotrac | refs/heads/master | trac/admin/tests/console.py | 2 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Tim Moloney <t.moloney@verizon.net>
import difflib
import inspect
import os
import re
import sys
import unittest
from StringIO import StringIO
# IAdminCommandProvider implementations
import trac.admin.api
import trac.attachment
import trac.perm
import trac.ticket.admin
import trac.versioncontrol.admin
import trac.versioncontrol.api
import trac.versioncontrol.web_ui
import trac.wiki.admin
# IPermissionRequestor implementations (for 'permission' related tests)
import trac.about
import trac.admin.web_ui
import trac.config
import trac.ticket.api
import trac.ticket.report
import trac.ticket.roadmap
import trac.ticket.web_ui
import trac.search.web_ui
import trac.timeline.web_ui
import trac.wiki.web_ui
from trac.admin.api import AdminCommandManager, IAdminCommandProvider, \
console_date_format, get_console_locale
from trac.admin.console import TracAdmin, TracAdminHelpMacro
from trac.core import Component, implements
from trac.test import EnvironmentStub
from trac.util.datefmt import format_date, get_date_format_hint, \
get_datetime_format_hint
from trac.util.translation import get_available_locales, has_babel
from trac.web.tests.session import _prep_session_table
STRIP_TRAILING_SPACE = re.compile(r'( +)$', re.MULTILINE)
def load_expected_results(file, pattern):
"""Reads the file, named file, which contains test results separated by the
regular expression pattern.
The test results are returned as a dictionary.
"""
expected = {}
compiled_pattern = re.compile(pattern)
f = open(file, 'r')
for line in f:
line = line.rstrip().decode('utf-8')
match = compiled_pattern.search(line)
if match:
test = match.groups()[0]
expected[test] = ''
else:
expected[test] += line + '\n'
f.close()
return expected
def execute_cmd(tracadmin, cmd, strip_trailing_space=True, input=None):
_in = sys.stdin
_err = sys.stderr
_out = sys.stdout
try:
if input:
sys.stdin = StringIO(input.encode('utf-8'))
sys.stdin.encoding = 'utf-8' # fake input encoding
sys.stderr = sys.stdout = out = StringIO()
out.encoding = 'utf-8' # fake output encoding
retval = None
try:
retval = tracadmin.onecmd(cmd)
except SystemExit:
pass
value = out.getvalue()
if isinstance(value, str): # reverse what print_listing did
value = value.decode('utf-8')
if strip_trailing_space:
return retval, STRIP_TRAILING_SPACE.sub('', value)
else:
return retval, value
finally:
sys.stdin = _in
sys.stderr = _err
sys.stdout = _out
class TracadminTestCase(unittest.TestCase):
"""
Tests the output of trac-admin and is meant to be used with
.../trac/tests.py.
"""
expected_results_file = os.path.join(os.path.dirname(__file__),
'console-tests.txt')
expected_results = load_expected_results(expected_results_file,
'===== (test_[^ ]+) =====')
def setUp(self):
self.env = EnvironmentStub(default_data=True, enable=('trac.*',),
disable=('trac.tests.*',))
self._admin = TracAdmin()
self._admin.env_set('', self.env)
# Set test date to 11th Jan 2004
self._test_date = '2004-01-11'
def tearDown(self):
self.env = None
def _execute(self, cmd, strip_trailing_space=True, input=None):
return execute_cmd(self._admin, cmd,
strip_trailing_space=strip_trailing_space,
input=input)
@property
def _datetime_format_hint(self):
return get_datetime_format_hint(get_console_locale(self.env))
def _get_command_help(self, *args):
docs = AdminCommandManager(self.env).get_command_help(list(args))
self.assertEqual(1, len(docs))
return docs[0][2]
def assertExpectedResult(self, output, args=None):
test_name = inspect.stack()[1][3]
expected_result = self.expected_results[test_name]
if args is not None:
expected_result %= args
self.assertEqual(expected_result, output)
def assertEqual(self, expected_results, output, msg=None):
""":deprecated: since 1.0.2, use `assertExpectedResult` instead."""
if not (isinstance(expected_results, basestring) and
isinstance(output, basestring)):
return unittest.TestCase.assertEqual(self, expected_results,
output, msg)
def diff():
# Create a useful delta between the output and the expected output
output_lines = ['%s\n' % x for x in output.split('\n')]
expected_lines = ['%s\n' % x for x in expected_results.split('\n')]
return ''.join(difflib.unified_diff(expected_lines, output_lines,
'expected', 'actual'))
if '[...]' in expected_results:
m = re.match(expected_results.replace('[...]', '.*'), output,
re.MULTILINE)
unittest.TestCase.assertTrue(self, m,
"%r != %r\n%s" % (expected_results,
output, diff()))
else:
unittest.TestCase.assertEqual(self, expected_results, output,
"%r != %r\n%s" % (expected_results,
output, diff()))
# Help test
def test_help_ok(self):
"""
Tests the 'help' command in trac-admin. Since the 'help' command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
from trac import __version__
rv, output = self._execute('help')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'version': __version__,
'date_format_hint': get_date_format_hint()
})
self.assertTrue(all(len(line) < 80 for line in output.split('\n')),
"Lines should be less than 80 characters in length.")
# Locale test
def _test_get_console_locale_with_babel(self):
from babel.core import Locale, UnknownLocaleError
locales = get_available_locales()
en_US = Locale.parse('en_US')
de = Locale.parse('de')
de_DE = Locale.parse('de_DE')
try:
default = Locale.default()
except UnknownLocaleError:
default = None
language = self.env.config.get('trac', 'default_language')
try:
self.assertEqual(default, get_console_locale(None, None))
self.env.config.set('trac', 'default_language', '')
if 'de' in locales:
self.assertEqual(de, get_console_locale(None, 'de_DE.UTF8'))
self.env.config.set('trac', 'default_language', 'de')
self.assertEqual(de, get_console_locale(self.env, None))
self.assertEqual(de, get_console_locale(self.env, 'C'))
self.env.config.set('trac', 'default_language', 'en_US')
self.assertEqual(en_US, get_console_locale(self.env, None))
self.assertEqual(en_US, get_console_locale(self.env, 'C'))
self.assertEqual(de, get_console_locale(self.env,
'de_DE.UTF8'))
if not locales: # compiled catalog is missing
self.assertEqual(default, get_console_locale(None,
'de_DE.UTF8'))
self.env.config.set('trac', 'default_language', 'de')
self.assertEqual(default, get_console_locale(self.env, None))
self.assertEqual(default, get_console_locale(self.env, 'C'))
self.env.config.set('trac', 'default_language', 'en_US')
self.assertEqual(en_US, get_console_locale(self.env, None))
self.assertEqual(en_US, get_console_locale(self.env, 'C'))
self.assertEqual(en_US, get_console_locale(self.env,
'de_DE.UTF8'))
finally:
self.env.config.set('trac', 'default_language', language)
def _test_get_console_locale_without_babel(self):
self.assertEqual(None, get_console_locale(None, 'en_US.UTF8'))
language = self.env.config.get('trac', 'default_language')
try:
self.env.config.set('trac', 'default_language', 'en_US')
self.assertEqual(None, get_console_locale(self.env, 'en_US.UTF8'))
finally:
self.env.config.set('trac', 'default_language', language)
if has_babel:
test_get_console_locale = _test_get_console_locale_with_babel
else:
test_get_console_locale = _test_get_console_locale_without_babel
# Attachment tests
def test_attachment_list_empty(self):
"""
Tests the 'attachment list' command in trac-admin, on a wiki page that
doesn't have any attachments.
"""
# FIXME: Additional tests should be written for the other 'attachment'
# commands. This requires being able to control the current
# time, which in turn would require centralizing the time
# provider, for example in the environment object.
rv, output = self._execute('attachment list wiki:WikiStart')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_attachment_add_nonexistent_resource(self):
"""Tests the 'attachment add' command in trac-admin, on a non-existent
resource."""
rv, output = self._execute('attachment add wiki:NonExistentPage %s'
% __file__)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
# Config tests
def test_config_get(self):
"""
Tests the 'config get' command in trac-admin. This particular
test gets the project name from the config.
"""
self.env.config.set('project', 'name', 'Test project')
rv, output = self._execute('config get project name')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_config_set(self):
"""
Tests the 'config set' command in trac-admin. This particular
test sets the project name using an option value containing a space.
"""
rv, output = self._execute('config set project name "Test project"')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
self.assertEqual('Test project',
self.env.config.get('project', 'name'))
def test_config_remove(self):
"""
Tests the 'config remove' command in trac-admin. This particular
test removes the project name from the config, therefore reverting
the option to the default value.
"""
self.env.config.set('project', 'name', 'Test project')
rv, output = self._execute('config remove project name')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
self.assertEqual('My Project', self.env.config.get('project', 'name'))
# Permission tests
def test_permission_list_ok(self):
"""
Tests the 'permission list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
rv, output = self._execute('permission list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_permission_add_one_action_ok(self):
"""
Tests the 'permission add' command in trac-admin. This particular
test passes valid arguments to add one permission and checks for
success.
"""
self._execute('permission add test_user WIKI_VIEW')
rv, output = self._execute('permission list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_permission_add_multiple_actions_ok(self):
"""
Tests the 'permission add' command in trac-admin. This particular
test passes valid arguments to add multiple permissions and checks for
success.
"""
self._execute('permission add test_user LOG_VIEW FILE_VIEW')
rv, output = self._execute('permission list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_permission_add_already_exists(self):
"""
Tests the 'permission add' command in trac-admin. This particular
test passes a permission that already exists and checks for the
message. Other permissions passed are added.
"""
rv, output = self._execute('permission add anonymous WIKI_CREATE '
'WIKI_VIEW WIKI_MODIFY')
self.assertEqual(0, rv, output)
rv, output2 = self._execute('permission list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output + output2)
def test_permission_remove_one_action_ok(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test passes valid arguments to remove one permission and checks for
success.
"""
self._execute('permission remove anonymous TICKET_MODIFY')
rv, output = self._execute('permission list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_permission_remove_multiple_actions_ok(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test passes valid arguments to remove multiple permission and checks
for success.
"""
self._execute('permission remove anonymous WIKI_CREATE WIKI_MODIFY')
rv, output = self._execute('permission list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_permission_remove_all_actions_for_user(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test removes all permissions for anonymous.
"""
self._execute('permission remove anonymous *')
rv, output = self._execute('permission list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_permission_remove_action_for_all_users(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test removes the TICKET_CREATE permission from all users.
"""
self._execute('permission add anonymous TICKET_CREATE')
self._execute('permission remove * TICKET_CREATE')
rv, output = self._execute('permission list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_permission_remove_unknown_user(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test tries removing a permission from an unknown user.
"""
rv, output = self._execute('permission remove joe TICKET_VIEW')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_permission_remove_action_not_granted(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test tries removing TICKET_CREATE from user anonymous, who doesn't
have that permission.
"""
rv, output = self._execute('permission remove anonymous TICKET_CREATE')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_permission_remove_action_granted_through_meta_permission(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test tries removing WIKI_VIEW from a user. WIKI_VIEW has been granted
through user anonymous."""
self._execute('permission add joe TICKET_VIEW')
rv, output = self._execute('permission remove joe WIKI_VIEW')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_permission_export_ok(self):
"""
Tests the 'permission export' command in trac-admin. This particular
test exports the default permissions to stdout.
"""
rv, output = self._execute('permission export')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_permission_import_ok(self):
"""
Tests the 'permission import' command in trac-admin. This particular
test exports additional permissions, removes them and imports them back.
"""
user = u'test_user\u0250'
self._execute('permission add ' + user + ' WIKI_VIEW')
self._execute('permission add ' + user + ' TICKET_VIEW')
rv, output = self._execute('permission export')
self._execute('permission remove ' + user + ' *')
rv, output = self._execute('permission import', input=output)
self.assertEqual(0, rv, output)
self.assertEqual('', output)
rv, output = self._execute('permission list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
# Component tests
def test_component_list_ok(self):
"""
Tests the 'component list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
rv, output = self._execute('component list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_component_add_ok(self):
"""
Tests the 'component add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('component add new_component')
rv, output = self._execute('component list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_component_add_optional_owner_ok(self):
"""
Tests the 'component add' command in trac-admin with the optional
'owner' argument. This particular test passes valid arguments and
checks for success.
"""
self._execute('component add new_component new_user')
rv, output = self._execute('component list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_component_add_error_already_exists(self):
"""
Tests the 'component add' command in trac-admin. This particular
test passes a component name that already exists and checks for an
error message.
"""
rv, output = self._execute('component add component1 new_user')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_component_rename_ok(self):
"""
Tests the 'component rename' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('component rename component1 changed_name')
rv, output = self._execute('component list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_component_rename_error_bad_component(self):
"""
Tests the 'component rename' command in trac-admin. This particular
test tries to rename a component that does not exist.
"""
rv, output = self._execute('component rename bad_component changed_name')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_component_rename_error_bad_new_name(self):
"""
Tests the 'component rename' command in trac-admin. This particular
test tries to rename a component to a name that already exists.
"""
rv, output = self._execute('component rename component1 component2')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_component_chown_ok(self):
"""
Tests the 'component chown' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('component chown component2 changed_owner')
rv, output = self._execute('component list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_component_chown_error_bad_component(self):
"""
Tests the 'component chown' command in trac-admin. This particular
test tries to change the owner of a component that does not
exist.
"""
rv, output = self._execute('component chown bad_component changed_owner')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_component_remove_ok(self):
"""
Tests the 'component remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('component remove component1')
rv, output = self._execute('component list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_component_remove_error_bad_component(self):
"""
Tests the 'component remove' command in trac-admin. This particular
test tries to remove a component that does not exist.
"""
rv, output = self._execute('component remove bad_component')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
# Ticket-type tests
def test_ticket_type_list_ok(self):
"""
Tests the 'ticket_type list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_add_ok(self):
"""
Tests the 'ticket_type add' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('ticket_type add new_type')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_add_error_already_exists(self):
"""
Tests the 'ticket_type add' command in trac-admin. This particular
test passes a ticket type that already exists and checks for an error
message.
"""
rv, output = self._execute('ticket_type add defect')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_change_ok(self):
"""
Tests the 'ticket_type change' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('ticket_type change defect bug')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_change_error_bad_type(self):
"""
Tests the 'ticket_type change' command in trac-admin. This particular
test tries to change a priority that does not exist.
"""
rv, output = self._execute('ticket_type change bad_type changed_type')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_change_error_bad_new_name(self):
"""
Tests the 'ticket_type change' command in trac-admin. This particular
test tries to change a ticket type to another type that already exists.
"""
rv, output = self._execute('ticket_type change defect task')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_remove_ok(self):
"""
Tests the 'ticket_type remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('ticket_type remove task')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_remove_error_bad_type(self):
"""
Tests the 'ticket_type remove' command in trac-admin. This particular
test tries to remove a ticket type that does not exist.
"""
rv, output = self._execute('ticket_type remove bad_type')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_order_down_ok(self):
"""
Tests the 'ticket_type order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('ticket_type order defect down')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_order_up_ok(self):
"""
Tests the 'ticket_type order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('ticket_type order enhancement up')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_ticket_type_order_error_bad_type(self):
"""
Tests the 'priority order' command in trac-admin. This particular
test tries to reorder a priority that does not exist.
"""
rv, output = self._execute('ticket_type order bad_type up')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
# Priority tests
def test_priority_list_ok(self):
"""
Tests the 'priority list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
rv, output = self._execute('priority list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_priority_add_ok(self):
"""
Tests the 'priority add' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('priority add new_priority')
rv, output = self._execute('priority list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_priority_add_many_ok(self):
"""
Tests adding more than 10 priority values. This makes sure that
ordering is preserved when adding more than 10 values.
"""
for i in xrange(11):
self._execute('priority add p%s' % i)
rv, output = self._execute('priority list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_priority_add_error_already_exists(self):
"""
Tests the 'priority add' command in trac-admin. This particular
test passes a priority name that already exists and checks for an
error message.
"""
rv, output = self._execute('priority add blocker')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_priority_change_ok(self):
"""
Tests the 'priority change' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('priority change major normal')
rv, output = self._execute('priority list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_priority_change_error_bad_priority(self):
"""
Tests the 'priority change' command in trac-admin. This particular
test tries to change a priority that does not exist.
"""
rv, output = self._execute('priority change bad_priority changed_name')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_priority_change_error_bad_new_name(self):
"""
Tests the 'priority change' command in trac-admin. This particular
test tries to change a priority to a name that already exists.
"""
rv, output = self._execute('priority change major minor')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_priority_remove_ok(self):
"""
Tests the 'priority remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('priority remove major')
rv, output = self._execute('priority list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_priority_remove_error_bad_priority(self):
"""
Tests the 'priority remove' command in trac-admin. This particular
test tries to remove a priority that does not exist.
"""
rv, output = self._execute('priority remove bad_priority')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_priority_order_down_ok(self):
"""
Tests the 'priority order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('priority order blocker down')
rv, output = self._execute('priority list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_priority_order_up_ok(self):
"""
Tests the 'priority order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('priority order critical up')
rv, output = self._execute('priority list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_priority_order_error_bad_priority(self):
"""
Tests the 'priority order' command in trac-admin. This particular
test tries to reorder a priority that does not exist.
"""
rv, output = self._execute('priority remove bad_priority')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
# Severity tests
def test_severity_list_ok(self):
"""
Tests the 'severity list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
rv, output = self._execute('severity list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_severity_add_ok(self):
"""
Tests the 'severity add' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('severity add new_severity')
rv, output = self._execute('severity list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_severity_add_error_already_exists(self):
"""
Tests the 'severity add' command in trac-admin. This particular
test passes a severity name that already exists and checks for an
error message.
"""
self._execute('severity add blocker')
rv, output = self._execute('severity add blocker')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_severity_change_ok(self):
"""
Tests the 'severity add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('severity add critical')
self._execute('severity change critical "end-of-the-world"')
rv, output = self._execute('severity list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_severity_change_error_bad_severity(self):
"""
Tests the 'severity change' command in trac-admin. This particular
test tries to change a severity that does not exist.
"""
rv, output = self._execute('severity change bad_severity changed_name')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_severity_change_error_bad_new_name(self):
"""
Tests the 'severity change' command in trac-admin. This particular
test tries to change a severity to a name that already exists.
"""
self._execute('severity add major')
self._execute('severity add critical')
rv, output = self._execute('severity change critical major')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_severity_remove_ok(self):
"""
Tests the 'severity add' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('severity remove trivial')
rv, output = self._execute('severity list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_severity_remove_error_bad_severity(self):
"""
Tests the 'severity remove' command in trac-admin. This particular
test tries to remove a severity that does not exist.
"""
rv, output = self._execute('severity remove bad_severity')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_severity_order_down_ok(self):
"""
Tests the 'severity order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('severity add foo')
self._execute('severity add bar')
self._execute('severity order foo down')
rv, output = self._execute('severity list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_severity_order_up_ok(self):
"""
Tests the 'severity order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('severity add foo')
self._execute('severity add bar')
self._execute('severity order bar up')
rv, output = self._execute('severity list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_severity_order_error_bad_severity(self):
"""
Tests the 'severity order' command in trac-admin. This particular
test tries to reorder a priority that does not exist.
"""
rv, output = self._execute('severity remove bad_severity')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
# Version tests
def test_version_list_ok(self):
"""
Tests the 'version list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
rv, output = self._execute('version list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_version_add_ok(self):
"""
Tests the 'version add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('version add 9.9 "%s"' % self._test_date)
rv, output = self._execute('version list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_version_add_error_already_exists(self):
"""
Tests the 'version add' command in trac-admin. This particular
test passes a version name that already exists and checks for an
error message.
"""
rv, output = self._execute('version add 1.0 "%s"' % self._test_date)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_version_rename_ok(self):
"""
Tests the 'version rename' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('version rename 1.0 9.9')
rv, output = self._execute('version list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_version_rename_error_bad_version(self):
"""
Tests the 'version rename' command in trac-admin. This particular
test tries to rename a version that does not exist.
"""
rv, output = self._execute('version rename bad_version changed_name')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_version_time_ok(self):
"""
Tests the 'version time' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('version time 2.0 "%s"' % self._test_date)
rv, output = self._execute('version list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_version_time_unset_ok(self):
"""
Tests the 'version time' command in trac-admin. This particular
test passes valid arguments for unsetting the date.
"""
self._execute('version time 2.0 "%s"' % self._test_date)
self._execute('version time 2.0 ""')
rv, output = self._execute('version list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_version_time_error_bad_version(self):
"""
Tests the 'version time' command in trac-admin. This particular
test tries to change the time on a version that does not exist.
"""
rv, output = self._execute('version time bad_version "%s"'
% self._test_date)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_version_remove_ok(self):
"""
Tests the 'version remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('version remove 1.0')
rv, output = self._execute('version list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_version_remove_error_bad_version(self):
"""
Tests the 'version remove' command in trac-admin. This particular
test tries to remove a version that does not exist.
"""
rv, output = self._execute('version remove bad_version')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
# Milestone tests
def test_milestone_list_ok(self):
"""
Tests the 'milestone list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
rv, output = self._execute('milestone list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_milestone_add_ok(self):
"""
Tests the 'milestone add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('milestone add new_milestone "%s"' % self._test_date)
rv, output = self._execute('milestone list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_milestone_add_utf8_ok(self):
"""
Tests the 'milestone add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute(u'milestone add \xa9tat_final "%s"' #\xc2\xa9
% self._test_date)
rv, output = self._execute('milestone list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_milestone_add_error_already_exists(self):
"""
Tests the 'milestone add' command in trac-admin. This particular
test passes a milestone name that already exists and checks for an
error message.
"""
rv, output = self._execute('milestone add milestone1 "%s"'
% self._test_date)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_milestone_add_invalid_date(self):
rv, output = self._execute('milestone add new_milestone <add>')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'hint': self._datetime_format_hint,
'isohint': get_datetime_format_hint('iso8601')
})
def test_milestone_rename_ok(self):
"""
Tests the 'milestone rename' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('milestone rename milestone1 changed_milestone')
rv, output = self._execute('milestone list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_milestone_rename_error_bad_milestone(self):
"""
Tests the 'milestone rename' command in trac-admin. This particular
test tries to rename a milestone that does not exist.
"""
rv, output = self._execute('milestone rename bad_milestone changed_name')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_milestone_due_ok(self):
"""
Tests the 'milestone due' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('milestone due milestone2 "%s"' % self._test_date)
rv, output = self._execute('milestone list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_milestone_due_unset_ok(self):
"""
Tests the 'milestone due' command in trac-admin. This particular
test passes valid arguments for unsetting the due date.
"""
self._execute('milestone due milestone2 "%s"' % self._test_date)
self._execute('milestone due milestone2 ""')
rv, output = self._execute('milestone list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_milestone_due_error_bad_milestone(self):
"""
Tests the 'milestone due' command in trac-admin. This particular
test tries to change the due date on a milestone that does not exist.
"""
rv, output = self._execute('milestone due bad_milestone "%s"'
% self._test_date)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_milestone_due_invalid_date(self):
rv, output = self._execute('milestone due milestone1 <due>')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'hint': self._datetime_format_hint,
'isohint': get_datetime_format_hint('iso8601')
})
def test_milestone_completed_ok(self):
"""
Tests the 'milestone completed' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
self._execute('milestone completed milestone2 "%s"' % self._test_date)
rv, output = self._execute('milestone list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_milestone_completed_error_bad_milestone(self):
"""
Tests the 'milestone completed' command in trac-admin. This particular
test tries to change the completed date on a milestone that does not
exist.
"""
rv, output = self._execute('milestone completed bad_milestone "%s"'
% self._test_date)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_milestone_completed_invalid_date(self):
rv, output = self._execute('milestone completed milestone1 <com>')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'hint': self._datetime_format_hint,
'isohint': get_datetime_format_hint('iso8601')
})
def test_milestone_remove_ok(self):
"""
Tests the 'milestone remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
self._execute('milestone remove milestone3')
rv, output = self._execute('milestone list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_milestone_remove_error_bad_milestone(self):
"""
Tests the 'milestone remove' command in trac-admin. This particular
test tries to remove a milestone that does not exist.
"""
rv, output = self._execute('milestone remove bad_milestone')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_backslash_use_ok(self):
if self._admin.interactive:
self._execute('version add \\')
else:
self._execute(r"version add '\'")
rv, output = self._execute('version list')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_session_list_no_sessions(self):
rv, output = self._execute('session list authenticated')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_session_list_authenticated(self):
_prep_session_table(self.env)
rv, output = self._execute('session list authenticated')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_session_list_anonymous(self):
_prep_session_table(self.env)
rv, output = self._execute('session list anonymous')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_session_list_all(self):
_prep_session_table(self.env)
if self._admin.interactive:
rv, output = self._execute("session list *")
else:
rv, output = self._execute("session list '*'")
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_session_list_authenticated_sid(self):
_prep_session_table(self.env)
rv, output = self._execute('session list name00')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_session_list_anonymous_sid(self):
_prep_session_table(self.env)
rv, output = self._execute('session list name10:0')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_session_list_missing_sid(self):
_prep_session_table(self.env)
rv, output = self._execute('session list thisdoesntexist')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_session_add_missing_sid(self):
rv, output = self._execute('session add')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_session_add_duplicate_sid(self):
_prep_session_table(self.env)
rv, output = self._execute('session add name00')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_session_add_sid_all(self):
rv, output = self._execute('session add john John john@example.org')
self.assertEqual(0, rv, output)
rv, output = self._execute('session list john')
self.assertExpectedResult(output, {
'today': format_date(None, console_date_format)
})
def test_session_add_sid(self):
rv, output = self._execute('session add john')
self.assertEqual(0, rv, output)
rv, output = self._execute('session list john')
self.assertExpectedResult(output, {
'today': format_date(None, console_date_format)
})
def test_session_add_sid_name(self):
rv, output = self._execute('session add john John')
self.assertEqual(0, rv, output)
rv, output = self._execute('session list john')
self.assertExpectedResult(output, {
'today': format_date(None, console_date_format)
})
def test_session_set_attr_name(self):
_prep_session_table(self.env)
rv, output = self._execute('session set name name00 JOHN')
self.assertEqual(0, rv, output)
rv, output = self._execute('session list name00')
self.assertExpectedResult(output)
def test_session_set_attr_email(self):
_prep_session_table(self.env)
rv, output = self._execute('session set email name00 JOHN@EXAMPLE.ORG')
self.assertEqual(0, rv, output)
rv, output = self._execute('session list name00')
self.assertExpectedResult(output)
def test_session_set_attr_missing_attr(self):
rv, output = self._execute('session set')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_session_set_attr_missing_value(self):
rv, output = self._execute('session set name john')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_session_set_attr_missing_sid(self):
rv, output = self._execute('session set name')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_session_set_attr_nonexistent_sid(self):
rv, output = self._execute('session set name john foo')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_session_delete_sid(self):
_prep_session_table(self.env)
rv, output = self._execute('session delete name00')
self.assertEqual(0, rv, output)
rv, output = self._execute('session list nam00')
self.assertExpectedResult(output)
def test_session_delete_missing_params(self):
rv, output = self._execute('session delete')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_session_delete_anonymous(self):
_prep_session_table(self.env)
rv, output = self._execute('session delete anonymous')
self.assertEqual(0, rv, output)
rv, output = self._execute('session list *')
self.assertExpectedResult(output)
def test_session_delete_multiple_sids(self):
_prep_session_table(self.env)
rv, output = self._execute('session delete name00 name01 name02 '
'name03')
self.assertEqual(0, rv, output)
rv, output = self._execute('session list *')
self.assertExpectedResult(output)
def test_session_purge_age(self):
_prep_session_table(self.env, spread_visits=True)
rv, output = self._execute('session purge 20100112')
self.assertEqual(0, rv, output)
rv, output = self._execute('session list *')
self.assertExpectedResult(output)
def test_session_purge_invalid_date(self):
rv, output = self._execute('session purge <purge>')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output, {
'hint': self._datetime_format_hint,
'isohint': get_datetime_format_hint('iso8601')
})
def test_help_milestone_due(self):
doc = self._get_command_help('milestone', 'due')
self.assertIn(self._datetime_format_hint, doc)
self.assertIn(u'"YYYY-MM-DDThh:mm:ss±hh:mm"', doc)
def test_help_milestone_completed(self):
doc = self._get_command_help('milestone', 'completed')
self.assertIn(self._datetime_format_hint, doc)
self.assertIn(u'"YYYY-MM-DDThh:mm:ss±hh:mm"', doc)
def test_help_version_time(self):
doc = self._get_command_help('version', 'time')
self.assertIn(self._datetime_format_hint, doc)
self.assertIn(u'"YYYY-MM-DDThh:mm:ss±hh:mm"', doc)
def test_help_session_purge(self):
doc = self._get_command_help('session', 'purge')
self.assertIn(u'"YYYY-MM-DDThh:mm:ss±hh:mm"', doc)
class TracadminNoEnvTestCase(unittest.TestCase):
def setUp(self):
self._admin = TracAdmin()
def tearDown(self):
self._admin = None
def _execute(self, cmd, strip_trailing_space=True, input=None):
return execute_cmd(self._admin, cmd,
strip_trailing_space=strip_trailing_space,
input=input)
def test_help(self):
rv, output = self._execute('help')
output = output.splitlines()
self.assertEqual('', output[-3])
self.assertEqual('help Show documentation', output[-2])
self.assertEqual('initenv Create and initialize a new environment',
output[-1])
def test_help_with_nocmd(self):
rv, output = self._execute('help nocmd')
output = output.splitlines()
self.assertEqual(["No documentation found for 'nocmd'. Use 'help' to "
"see the list of commands."],
output)
class TracAdminHelpMacroTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=['%s.UnicodeHelpCommand' %
self.__module__])
def tearDown(self):
self.env.reset_db()
def test_unicode_help(self):
unicode_help = u'Hélp text with unicöde charàcters'
class UnicodeHelpCommand(Component):
implements(IAdminCommandProvider)
def get_admin_commands(self):
yield ('unicode-help', '', unicode_help,
None, self._cmd)
def _cmd(self):
pass
macro = TracAdminHelpMacro(self.env)
help = unicode(macro.expand_macro(None, None, 'unicode-help'))
self.assertTrue(unicode_help in help)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TracadminTestCase))
suite.addTest(unittest.makeSuite(TracadminNoEnvTestCase))
suite.addTest(unittest.makeSuite(TracAdminHelpMacroTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
largelymfs/MTMSWord2Vec | refs/heads/master | adapter.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: largelyfs
# @Date: 2015-02-24 10:58:11
# @Last Modified by: largelyfs
# @Last Modified time: 2015-02-27 16:34:17
#convert the MS-word2vec to Word2vec model
def convert(input, output):
with open(input) as fin, open(output,"w") as fout:
print >>fout, fin.readline().strip()
while True:
l1 = fin.readline()
if not l1:
break
l2 = fin.readline()
print >>fout, l1.strip().split()[0],
print >>fout, l2.strip()
if __name__=="__main__":
convert("./output.txt","vectors.bin") |
utecuy/edx-platform | refs/heads/master | lms/djangoapps/support/tests/test_refund.py | 52 | """
Tests for refunds on the support dashboard
DEPRECATION WARNING:
This test suite is deliberately separate from the other view tests
so we can easily deprecate it once the transition from shoppingcart
to the E-Commerce service is complete.
"""
import datetime
from django.test.client import Client
from course_modes.models import CourseMode
from shoppingcart.models import CertificateItem, Order
from student.models import CourseEnrollment
from student.roles import SupportStaffRole
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class RefundTests(ModuleStoreTestCase):
"""
Tests for the manual refund page
"""
def setUp(self):
super(RefundTests, self).setUp()
self.course = CourseFactory.create(
org='testorg', number='run1', display_name='refundable course'
)
self.course_id = self.course.location.course_key
self.client = Client()
self.admin = UserFactory.create(
username='test_admin',
email='test_admin+support@edx.org',
password='foo'
)
SupportStaffRole().add_users(self.admin)
self.client.login(username=self.admin.username, password='foo')
self.student = UserFactory.create(
username='student',
email='student+refund@edx.org'
)
self.course_mode = CourseMode.objects.get_or_create(course_id=self.course_id, mode_slug='verified')[0]
self.order = None
self.form_pars = {'course_id': str(self.course_id), 'user': self.student.email}
def tearDown(self):
self.course_mode.delete()
Order.objects.filter(user=self.student).delete()
super(RefundTests, self).tearDown()
def _enroll(self, purchase=True):
# pylint: disable=missing-docstring
CourseEnrollment.enroll(self.student, self.course_id, self.course_mode.mode_slug)
if purchase:
self.order = Order.get_cart_for_user(self.student)
CertificateItem.add_to_order(self.order, self.course_id, 1, self.course_mode.mode_slug)
self.order.purchase()
self.course_mode.expiration_datetime = datetime.datetime(1983, 4, 6)
self.course_mode.save()
def test_support_access(self):
response = self.client.get('/support/')
self.assertTrue(response.status_code, 200)
self.assertContains(response, 'Manual Refund')
response = self.client.get('/support/refund/')
self.assertTrue(response.status_code, 200)
# users without the permission can't access support
SupportStaffRole().remove_users(self.admin)
response = self.client.get('/support/')
self.assertTrue(response.status_code, 302)
response = self.client.get('/support/refund/')
self.assertTrue(response.status_code, 302)
def test_bad_courseid(self):
response = self.client.post('/support/refund/', {'course_id': 'foo', 'user': self.student.email})
self.assertContains(response, 'Invalid course id')
def test_bad_user(self):
response = self.client.post('/support/refund/', {'course_id': str(self.course_id), 'user': 'unknown@foo.com'})
self.assertContains(response, 'User not found')
def test_not_refundable(self):
self._enroll()
self.course_mode.expiration_datetime = datetime.datetime(2033, 4, 6)
self.course_mode.save()
response = self.client.post('/support/refund/', self.form_pars)
self.assertContains(response, 'not past the refund window')
def test_no_order(self):
self._enroll(purchase=False)
response = self.client.post('/support/refund/', self.form_pars)
self.assertContains(response, 'No order found for %s' % self.student.username)
def test_valid_order(self):
self._enroll()
response = self.client.post('/support/refund/', self.form_pars)
self.assertContains(response, "About to refund this order")
self.assertContains(response, "enrolled")
self.assertContains(response, "CertificateItem Status")
def test_do_refund(self):
self._enroll()
pars = self.form_pars
pars['confirmed'] = 'true'
response = self.client.post('/support/refund/', pars)
self.assertTrue(response.status_code, 302)
response = self.client.get(response.get('location')) # pylint: disable=maybe-no-member
self.assertContains(response, "Unenrolled %s from" % self.student)
self.assertContains(response, "Refunded 1 for order id")
self.assertFalse(CourseEnrollment.is_enrolled(self.student, self.course_id))
|
GeodesicCarbon/protopaja-sick | refs/heads/master | src/rosserial_embeddedlinux/src/examples/ExampleServiceClient/exampleService.py | 61 | #!/usr/bin/env python
"""
Sample code to use with ServiceClient.pde
"""
import roslib; roslib.load_manifest("rosserial_arduino")
import rospy
from rosserial_arduino.srv import *
def callback(req):
print "The arduino is calling! Please send it a message:"
t = TestResponse()
t.output = raw_input()
return t
rospy.init_node("service_client_test")
rospy.Service("test_srv", Test, callback)
rospy.spin()
|
hughsie/PackageKit | refs/heads/master | contrib/fix_translations.py | 5 | #!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1+
import sys
import os
import subprocess
def _do_msgattrib(fn):
argv = [
'msgattrib',
'--no-location',
'--translated',
'--no-wrap',
'--sort-output',
fn,
'--output-file=' + fn,
]
ret = subprocess.run(argv)
if ret.returncode != 0:
return
def _do_nukeheader(fn):
clean_lines = []
with open(fn) as f:
lines = f.readlines()
for line in lines:
if line.startswith('"POT-Creation-Date:'):
continue
if line.startswith('"PO-Revision-Date:'):
continue
if line.startswith('"Last-Translator:'):
continue
clean_lines.append(line)
with open(fn, 'w') as f:
f.writelines(clean_lines)
def _process_file(fn):
_do_msgattrib(fn)
_do_nukeheader(fn)
if __name__ == '__main__':
if len(sys.argv) == 1:
print('path required')
sys.exit(1)
try:
dirname = sys.argv[1]
for fn in os.listdir(dirname):
if fn.endswith('.po'):
_process_file(os.path.join(dirname, fn))
except NotADirectoryError as _:
print('path required')
sys.exit(2)
|
rdegraaf/dyphal | refs/heads/master | tools/DyphalGenerator.py | 1 | #!/usr/bin/python3
"""Server-side data generator for Dyphal, the Dynamic Photo Album.
Copyright (c) Rennie deGraaf, 2005-2017.
DyphalGenerator is a tool to create photo albums to display using
Dyphal. It can import metadata from a variety of embedded photo tags
(EXIF, IPTC, etc.) and it understands catalog files created by gThumb
3.x. Hopefully the UI is self-explanatory, because there isn't any
detailed usage documentation at this time.
DyphalGenerator requires Python 3.3 or later, only runs on Linux, and
requires that the commands 'convert' from the ImageMagick package and
'exiftool' are available in the current path.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) version 3.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Rennie deGraaf <rennie.degraaf@gmail.com>"
__version__ = "VERSION"
__date__ = "DATE"
#__all__ = "" # Uncomment to limit the amount of data that pydoc spews out.
import sys
import os
import os.path
import xml.etree.ElementTree
import concurrent.futures
import subprocess
import json
import tempfile
import traceback
import functools
import shutil
import urllib.parse
from PyQt4 import QtCore
from PyQt4 import QtGui
from dyphal.ui import Ui_MainWindow
from dyphal.about import Ui_AboutDialog
from dyphal.util import DirectoryHandleList, handle_exceptions, ensure_directory
from dyphal.photo import PhotoFile
from dyphal.album import Album, ParseError, SaveError
# These variables may be re-written by the installation script
DATA_PATH = os.path.expanduser("~/.share/dyphal/")
CONFIG_PATH = os.path.expanduser("~/.config/")
CONFIG_NAME = "DyphalGenerator.conf"
class Config(object):
"""Run-time configuration.
Attributes:
photoDir (str): The name of the directory from which photos
were last imported.
gthumb3Dir (str): The name of the directory from which a gThumb
3 catalog was last imported.
outputDir (str): The name of the directory where an album was
last created.
photoQuality (int): The quality percentage for resized photos.
maxWorkers (int): The maximum number of background threads to
use.
dimensions ((int, int)): The current window dimensions.
uiData (dict): Contents of certain UI fields that were saved
from the last session.
tempDir (tempfile.TemporaryDirectory): A secure temporary
directory to hold links to photos and generated files.
_file (file): A handle to the configuration file.
_umask (int): Saved umask.
"""
PROGRAM_NAME = "Dyphal Generator"
THUMB_WIDTH = 160
THUMB_HEIGHT = 120
THUMB_QUALITY = 50
BG_TIMEOUT = 5
TEMPLATE_FILE_NAMES = ["album.css", "back.png", "common.css", "debug.css", "dyphal.js",
"help.png", "index.html", "javascript.html", "next.png",
"photo.css", "placeholder.png", "prev.png", "README.html"]
DEFAULT_PHOTO_DIR = os.path.expanduser("~")
DEFAULT_GTHUMB3_DIR = os.path.expanduser("~/.local/share/gthumb/catalogs")
DEFAULT_GTHUMB2_DIR = os.path.expanduser("~/.gnome2/gthumb/collections")
DEFAULT_OUTPUT_DIR = os.path.expanduser("~")
DEFAULT_PHOTO_QUALITY = 75
DEFAULT_THREADS = 8
METADATA_DIR = "metadata"
PHOTO_DIR = "photos"
THUMBNAIL_DIR = "thumbnails"
def __init__(self):
"""Set up run-time configuration. Load the configuration file
and set up shared resources. Populate any run-time properties
not found in the file with sane defaults."""
# Load the configuration file. Keep the handle so that we can save to the same file.
self._file = None
data = {}
self._umask = os.umask(0o22)
try:
ensure_directory(CONFIG_PATH)
# Python's 'r+' mode doesn't create files if they don't already exist.
self._file = open(os.path.join(CONFIG_PATH, CONFIG_NAME), "r+",
opener=lambda path, flags: os.open(path, flags|os.O_CREAT, 0o666))
data = json.load(self._file)
except (FileNotFoundError, ValueError):
# open() can fail with FileNotFoundError if a directory in the path doesn't exist.
# json.load() can fail with ValueError if the file is empty or otherwise invalid.
pass
except Exception:
# We'll just ignore any other failures and continue without a configuration file.
(exc_type, exc_value, exc_traceback) = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
# Used during operation and stored in the configuration file
self.photoDir = data["photoDir"] if "photoDir" in data else self.DEFAULT_PHOTO_DIR
self.gthumb3Dir = data["gthumb3Dir"] if "gthumb3Dir" in data else self.DEFAULT_GTHUMB3_DIR
#self.gthumb2Dir = data["gthumb2Dir"] if "gthumb2Dir" in data else self.DEFAULT_GTHUMB2_DIR
self.outputDir = data["outputDir"] if "outputDir" in data else self.DEFAULT_OUTPUT_DIR
self.photoQuality = self.DEFAULT_PHOTO_QUALITY
if "photoQuality" in data and 0 < data["photoQuality"] and 100 >= data["photoQuality"]:
self.photoQuality = data["photoQuality"]
# Used only at startup and stored in the configuration file
ideal_thread_count = QtCore.QThread.idealThreadCount()
if 0 < ideal_thread_count:
# Some tasks are I/O-bound and some are CPU-bound, so let's go with
# twice the number of CPU cores.
self.maxWorkers = 2 * ideal_thread_count
else:
self.maxWorkers = self.DEFAULT_THREADS
if "threads" in data and 0 < data["threads"] and 50 >= data["threads"]:
self.maxWorkers = data["threads"]
self.dimensions = data["dimensions"] if "dimensions" in data else None
self.uiData = data["uiData"] if "uiData" in data else None
# Not stored in the configuration file
self.tempDir = tempfile.TemporaryDirectory()
def save(self):
"""Save the current state to the configuration file."""
# If we couldn't open or create the config file, don't bother saving.
if None is not self._file:
data = {}
data["photoDir"] = self.photoDir
data["gthumb3Dir"] = self.gthumb3Dir
data["outputDir"] = self.outputDir
data["photoQuality"] = self.photoQuality
data["threads"] = self.maxWorkers
data["dimensions"] = self.dimensions
data["uiData"] = self.uiData
self._file.seek(0)
self._file.truncate(0)
json.dump(data, self._file, sort_keys=True)
self._file.flush()
def close(self):
"""Close the configuration file and tear down shared resources."""
self.tempDir.cleanup()
self.tempDir = None
self._file.close()
self._file = None
os.umask(self._umask)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
class ListKeyFilter(QtCore.QObject):
"""QT filter to handle certain keypress events."""
delKeyPressed = QtCore.pyqtSignal() # 'del' key pressed.
escKeyPressed = QtCore.pyqtSignal() # 'esc' key pressed.
def eventFilter(self, obj, event):
"""Handle 'del' and 'esc' keypress events."""
if QtCore.QEvent.KeyPress == event.type():
if QtCore.Qt.Key_Delete == event.key():
self.delKeyPressed.emit()
return True
elif QtCore.Qt.Key_Escape == event.key():
self.escKeyPressed.emit()
return True
return False
class DyphalUI(QtGui.QMainWindow, Ui_MainWindow):
"""The Dyphal Generator UI.
Attributes (not including UI objects):
_config (Config): The run-time configuration object.
_threads (concurrent.futures.Executor): The thread pool for
background tasks.
_backgroundCount (int): The number of background activities
(*not* tasks) that are pending.
_backgroundTasks (list of concurrent.futures.Future): Pending
background tasks.
_currentAlbumFileName (str): The name of the current album file.
_dirty (bool): True if the album data has changed since the
last save; false otherwise.
"""
FILTER_IMAGES = "Images (*.jpeg *.jpg *.png *.tiff *.tif)"
FILTER_GTHUMB3_CATALOGS = "gThumb catalogs (*.catalog)"
FILTER_ALBUMS = "Albums (*.dyphal);;JSON Albums (*.json);;All (*.*)"
_addPhotoSignal = QtCore.pyqtSignal(PhotoFile, bool) # A photo is ready to be added to the UI.
_showErrorSignal = QtCore.pyqtSignal(str) # An error message needs to be displayed.
_incProgressSignal = QtCore.pyqtSignal() # A background processing step has completed.
_backgroundCompleteSignal = QtCore.pyqtSignal(bool) # Background processing has completed.
_renamePhotosSignal = QtCore.pyqtSignal(list) # Photos need to be renamed due to collisions.
_setAlbumDataSignal = QtCore.pyqtSignal(str, dict) # An album has been loaded.
_closeSignal = QtCore.pyqtSignal() # Program exit was requested from a background thread.
_dirtySignal = QtCore.pyqtSignal(bool) # A background thread dirtied or undirtied the album.
def __init__(self, config):
"""Initialize a DyphalUI. Hooks up event handlers and
performs other UI initialization that the generated code from
Designer can't do."""
super().__init__()
self._config = config
self._threads = concurrent.futures.ThreadPoolExecutor(self._config.maxWorkers)
self._backgroundCount = 0
self._backgroundTasks = None
self._currentAlbumFileName = None
self.setupUi(self)
if None is not self._config.dimensions:
self.resize(*self._config.dimensions)
if None is not self._config.uiData:
self._restoreUIData(self._config.uiData)
self.progressBar.setVisible(False)
self.cancelButton.setVisible(False)
self.generateAlbumButton.setVisible(False)
self._dirty = False
# Set the sizes of the photo list and properties within the splitter.
self.splitter.setStretchFactor(0, 5)
self.splitter.setStretchFactor(1, 1)
self.splitter.setCollapsible(0, False)
# Set up the menu for the "Add Photos" button
self._addPhotosButtonMenu = QtGui.QMenu(self.addPhotosButton)
self._addPhotosFiles = QtGui.QAction("Add Files...", self._addPhotosButtonMenu)
self._addPhotosGthumb3 = QtGui.QAction("Add a gThumb 3 Catalog...",
self._addPhotosButtonMenu)
self._addPhotosButtonMenu.addAction(self._addPhotosFiles)
self._addPhotosButtonMenu.addAction(self._addPhotosGthumb3)
self.addPhotosButton.setMenu(self._addPhotosButtonMenu)
# Set up the menu for the "Add Caption" button
self._addCaptionButtonMenu = QtGui.QMenu(self.addCaptionButton)
self.addCaptionButton.setMenu(self._addCaptionButtonMenu)
# Set up the menu for the "Add Property" button
self._addPropertyButtonMenu = QtGui.QMenu(self.addPropertyButton)
self.addPropertyButton.setMenu(self._addPropertyButtonMenu)
# Listen for keyboard events in photosList, captionsList, and propertiesList
self._photosListFilter = ListKeyFilter()
self.photosList.installEventFilter(self._photosListFilter)
self._captionsListFilter = ListKeyFilter()
self.captionsList.installEventFilter(self._captionsListFilter)
self._propertiesListFilter = ListKeyFilter()
self.propertiesList.installEventFilter(self._propertiesListFilter)
# Event handlers
self._addPhotosFiles.triggered.connect(self._addPhotosHandler)
self._addPhotosGthumb3.triggered.connect(self._addPhotosHandler)
self.removePhotosButton.clicked.connect(self._removePhotosHandler)
self.photosList.itemSelectionChanged.connect(self._showProperties)
self.photosList.itemActivated.connect(self._showPhoto)
self._addPhotoSignal.connect(self._addPhoto)
self._showErrorSignal.connect(self._showError)
self._incProgressSignal.connect(self._incProgress)
self._backgroundCompleteSignal.connect(self._backgroundComplete)
self.showAllCaptionsFlag.stateChanged.connect(self._updatePhotoCaptions)
self.showAllPropertiesFlag.stateChanged.connect(self._updatePhotoProperties)
self.removeCaptionsButton.clicked.connect(self._removeCaptionsHandler)
self.removePropertiesButton.clicked.connect(self._removePropertiesHandler)
self.generateAlbumButton.clicked.connect(self._generateAlbum)
self.newAlbumButton.clicked.connect(self._newAlbum)
self.openAlbumButton.clicked.connect(self._openAlbum)
self.installTemplateButton.clicked.connect(self._installTemplate)
self.cancelButton.clicked.connect(self._cancelBackgroundTasks)
self.aboutButton.clicked.connect(self._about)
self._photosListFilter.delKeyPressed.connect(self._removePhotosHandler)
self._photosListFilter.escKeyPressed.connect(self.photosList.clearSelection)
self._captionsListFilter.delKeyPressed.connect(self._removeCaptionsHandler)
self._captionsListFilter.escKeyPressed.connect(self.captionsList.clearSelection)
self._propertiesListFilter.delKeyPressed.connect(self._removePropertiesHandler)
self._propertiesListFilter.escKeyPressed.connect(self.propertiesList.clearSelection)
self._renamePhotosSignal.connect(self._renamePhotos)
self._setAlbumDataSignal.connect(self._setAlbumData)
self._closeSignal.connect(self.close)
self.photoSizeButton.currentIndexChanged.connect(lambda: self._setDirty())
self.titleText.textChanged.connect(self._setDirty)
self.footerText.textChanged.connect(self._setDirty)
self.descriptionText.textChanged.connect(self._setDirty)
self._dirtySignal.connect(self._setDirty)
def _setDirty(self, dirty=True):
"""Marks the current album as having changed."""
self._dirty = dirty
def _bgExit(self, pending_tasks):
"""Background task to trigger program exit."""
if None is not pending_tasks:
concurrent.futures.wait(pending_tasks)
self._backgroundCompleteSignal.emit(True)
# self.close() can't be called from a background thread.
self._closeSignal.emit()
def closeEvent(self, event):
"""Main window close event handler. Shutdown the thread pool
and save the run-time configuration."""
# Prompt if the album is dirty.
if self._dirty and 0 < self.photosList.count() \
and QtGui.QMessageBox.No == QtGui.QMessageBox.warning(self, "Exit",
"The current album has not been saved. Realy exit?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No):
event.ignore()
return
# Prompt if there are background operations in progress.
if 0 != self._backgroundCount:
prompt_dialog = QtGui.QMessageBox(self)
prompt_dialog.setWindowTitle("Exit")
prompt_dialog.setIcon(QtGui.QMessageBox.Warning)
prompt_dialog.setText("There is an operation in progress. Wait for it to complete, " \
"or exit anyway?")
wait_button = prompt_dialog.addButton("Wait", QtGui.QMessageBox.ApplyRole)
prompt_dialog.addButton("Exit", QtGui.QMessageBox.DestructiveRole)
prompt_dialog.setDefaultButton(wait_button)
prompt_dialog.setEscapeButton(wait_button)
prompt_dialog.exec_()
if wait_button is prompt_dialog.clickedButton():
# Disable UI controls, except the Cancel button.
for child in self.findChildren(QtGui.QWidget):
if child is not self.cancelButton and child is not self.progressBar \
and None is child.findChild(QtGui.QPushButton, "cancelButton"):
child.setEnabled(False)
# Post a background task to exit after everything else completes.
# Don't register the task so that it cannot be cancelled.
self._backgroundInit(0)
self._threads.submit(self._bgExit, self._backgroundTasks)
self._backgroundStart([])
event.ignore()
return
else:
self._cancelBackgroundTasks()
self._threads.shutdown()
self._config.dimensions = (self.size().width(), self.size().height())
self._config.uiData = self._saveUIData()
self._config.save()
event.accept()
def _saveUIData(self):
"""Retrieve UI data fields that are likely to remain the same
between albums."""
data = {}
# I deliberately don't save title, caption or photos because they're likely to change
# between albums. These fields are much more likely to stay the same.
data["photoResolution"] = \
tuple(int(s) for s in self.photoSizeButton.currentText().split("x"))
data["footer"] = self.footerText.toPlainText()
data["captionFields"] = \
[self.captionsList.item(i).text() for i in range(0, self.captionsList.count())]
data["propertyFields"] = \
[self.propertiesList.item(i).text() for i in range(0, self.propertiesList.count())]
return data
def _restoreUIData(self, ui_data, require_fields=False):
"""Restore UI data fields that are likely to remain the same
between albums."""
if require_fields or "photoResolution" in ui_data:
resolution = "x".join([str(s) for s in ui_data["photoResolution"]])
for i in range(0, self.photoSizeButton.count()):
if resolution == self.photoSizeButton.itemText(i):
self.photoSizeButton.setCurrentIndex(i)
break
if require_fields or "footer" in ui_data:
self.footerText.setPlainText(ui_data["footer"])
if require_fields or "captionFields" in ui_data:
self.captionsList.clear()
for prop in ui_data["captionFields"]:
self.captionsList.addItem(prop)
if require_fields or "propertyFields" in ui_data:
self.propertiesList.clear()
for prop in ui_data["propertyFields"]:
self.propertiesList.addItem(prop)
def _addPhotosHandler(self):
"""Prompt the user for photos to add to the album, then load
them."""
sender = self.sender()
if self._addPhotosFiles is sender:
# Browse for photos
file_names = QtGui.QFileDialog.getOpenFileNames(self, "Select photos",
self._config.photoDir,
self.FILTER_IMAGES)
self._addPhotoFiles([(name, os.path.basename(name)) for name in file_names])
if 0 < len(file_names):
self._config.photoDir = os.path.dirname(file_names[len(file_names)-1])
elif self._addPhotosGthumb3 is sender:
# Add a gThumb 3 catalog
catalog_file_name = QtGui.QFileDialog.getOpenFileName(self, "Select catalog",
self._config.gthumb3Dir,
self.FILTER_GTHUMB3_CATALOGS)
# The QT documentation says that getOpenFileName returns a null string on cancel. But
# it returns an empty string here. Maybe that's a PyQt bug?
if "" != catalog_file_name:
tree = xml.etree.ElementTree.parse(catalog_file_name)
# Files appear in arbitrary order in a gThumb 3 catalog file.
# I assume that the display order is the names sorted alphabetically.
if "1.0" == tree.getroot().get("version"):
filenames = sorted(
[QtCore.QUrl(urllib.parse.unquote(elmt.attrib["uri"])).toLocalFile()
for elmt in tree.getroot().iter("file")])
self._addPhotoFiles([(name, os.path.basename(name)) for name in filenames])
self._config.gthumb3Dir = os.path.dirname(catalog_file_name)
else:
QtGui.QMessageBox.warning(self, Config.PROGRAM_NAME,
"Unsupported gThumb catalog version",
QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
else:
print("ERROR: unknown item selected in 'Add Photos' control")
def _addPhotoFiles(self, filenames, dirtying=True):
"""Start background tasks to load a list of photos."""
if 0 < len(filenames):
self._backgroundInit(len(filenames))
tasks = []
task = None
for (path, name) in filenames:
task = self._threads.submit(self._bgAddPhoto, path, name, task, dirtying)
task.photoName = path
tasks.append(task)
task = self._threads.submit(functools.partial(handle_exceptions,
self._bgAddPhotoComplete), tasks)
self._backgroundStart(tasks+[task])
def _removePhotosHandler(self):
"""Remove the currently selected photos from the album."""
items = self.photosList.selectedItems()
if 0 < len(items):
# Clear the selection so that I donn't need to update the selection and make callbacks
# with every deletion, which takes a while.
self.photosList.clearSelection()
# I need to remove the photo from the list on foreground thread, because the list is
# owned by the GUI. I need to close the Photo object on a background thread, because
# that's I/O.
self._backgroundInit(len(items))
tasks = []
task = None
for item in items:
photo = self.photosList.takeItem(self.photosList.indexFromItem(item).row())
task = self._threads.submit(self._bgRemovePhoto, photo)
tasks.append(task)
task = self._threads.submit(functools.partial(handle_exceptions,
self._bgRemovePhotosComplete), tasks)
self._backgroundStart(tasks+[task])
if 0 == self.photosList.count():
self.generateAlbumButton.setVisible(False)
self._dirty = True
def _addPhoto(self, photo, dirtying):
"""Add a photo that has been loaded to the album."""
self.photosList.addItem(photo)
self.generateAlbumButton.setVisible(True)
if dirtying:
self._dirty = True
def _showProperties(self):
"""Display the properties of the most recently selected photo."""
self.photoProperties.clear()
# When the user deselects everything, currentRow and currentItem remain the last selected
# item. But selectedItems() is empty.
if 0 != len(self.photosList.selectedItems()) and None is not self.photosList.currentItem():
photo = self.photosList.currentItem()
line_break = ""
for obj in [photo.captions, photo.properties]:
for prop in sorted(obj.keys()):
self.photoProperties.insertHtml("%s<strong>%s</strong>: %s" %
(line_break, prop, obj[prop]))
if "" == line_break:
line_break = "<br>"
def _showPhoto(self, photo):
"""Display a photo using the system's image viewer."""
QtGui.QDesktopServices.openUrl(QtCore.QUrl.fromLocalFile(photo.getPath()))
def _showError(self, err):
"""Show an error message."""
QtGui.QMessageBox.warning(self, Config.PROGRAM_NAME, err, QtGui.QMessageBox.Ok,
QtGui.QMessageBox.Ok)
def _incProgress(self):
"""Increment the progress bar counter."""
self.progressBar.setValue(self.progressBar.value() + 1)
def _backgroundInit(self, steps):
"""Initialize the progress bar for a background action. This
must occur before any background tasks can run."""
self._backgroundCount += 1
if 1 == self._backgroundCount:
self._backgroundTasks = []
self.generateAlbumButton.setVisible(False)
self.progressBar.setMaximum(steps)
self.progressBar.setValue(0)
else:
self.progressBar.setMaximum(self.progressBar.maximum() + steps)
def _backgroundStart(self, tasks):
"""Show the cancellation UI. Don't do this until after the
background tasks are registered so that there's something to
cancel."""
self._backgroundTasks.extend(tasks)
self.progressBar.setVisible(True)
self.cancelButton.setVisible(True)
def _backgroundComplete(self, force):
"""Dismiss the cancellation UI."""
if True is force:
assert 0 <= self._backgroundCount
self._backgroundCount = 0
else:
assert 0 < self._backgroundCount
self._backgroundCount -= 1
if True is force or 0 == self._backgroundCount:
self.cancelButton.setVisible(False)
self.progressBar.setVisible(False)
self._backgroundTasks = None
if 0 < self.photosList.count():
self.generateAlbumButton.setVisible(True)
def _bgAddPhoto(self, path, name, prev_task, dirtying):
"""Background task to load a photo and signal the UI to add it
to the album when done."""
photo = PhotoFile(path, name, self._config)
photo.addRef()
# Wait for the previous photo to be loaded so that photos are added to the list in the
# correct order.
if None is not prev_task:
concurrent.futures.wait([prev_task])
self._addPhotoSignal.emit(photo, dirtying)
self._incProgressSignal.emit()
def _bgAddPhotoComplete(self, tasks):
"""Background task to display any errors encountered while
loading photos, prompt the user to rename any photos with non-
unique names, and update the lists of available properties and
captions."""
# Wait for the addPhoto tasks to complete.
(done, not_done) = concurrent.futures.wait(tasks)
assert 0 == len(not_done)
# Display any error messages and find any files that need to be renamed
errors = []
rename_photos = []
for task in done:
try:
task.result()
except FileNotFoundError as exc:
# Either exiftool or the photo was missing.
if "exiftool" == exc.filename:
errors.append("Error executing 'exiftool'. Is it installed?")
else:
errors.append("Error opening photo " + exc.filename)
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
# Exiftool failed or timed out.
errors.append("Error reading metadata from photo " + task.photoName)
except FileExistsError:
# The symlink target already exists, implying a duplicate file name.
rename_photos.append(task.photoName)
except concurrent.futures.CancelledError:
# The task was cancelled.
pass
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
errors.append(str(exc_type) + ": " + str(exc_value))
if 0 != len(errors):
self._showErrorSignal.emit(str(len(errors)) +
" errors were encountered loading files:\n" +
"\n".join(errors))
# Update the available properties list
self.showAllPropertiesFlag.stateChanged.emit(0)
self.showAllCaptionsFlag.stateChanged.emit(0)
# Get the user to handle any photos that need renaming
if 0 != len(rename_photos):
self._renamePhotosSignal.emit(rename_photos)
# Re-enable any disabled buttons
self._backgroundCompleteSignal.emit(False)
def _bgRemovePhoto(self, photo):
"""Background task to clean up after removing a photo."""
photo.release()
self._incProgressSignal.emit()
def _bgRemovePhotosComplete(self, tasks):
"""Background task to perform clean-up after removing photos."""
# Wait for the removePhoto tasks to complete.
not_done = concurrent.futures.wait(tasks)[1]
assert 0 == len(not_done)
# Update the available properties and captions
self.showAllPropertiesFlag.stateChanged.emit(0)
self.showAllCaptionsFlag.stateChanged.emit(0)
# Re-enable any disabled buttons
self._backgroundCompleteSignal.emit(False)
def _updatePhotoProperties(self):
"""Rebuild the list of properties available in the currently
loaded photos."""
properties = {}
count = self.photosList.count()
for i in range(count):
photo = self.photosList.item(i)
for prop in photo.properties.keys():
if prop in properties:
properties[prop] += 1
else:
properties[prop] = 1
# Rebuild the list
self._addPropertyButtonMenu.clear()
show_all = self.showAllPropertiesFlag.isChecked()
for prop in sorted(properties.keys()):
if show_all or count == properties[prop]:
self._addPropertyButtonMenu.addAction(prop, self._addPropertyHandler)
def _updatePhotoCaptions(self):
"""Rebuild the list of captions available in the currently
loaded photos."""
# Figure out what caption fields we have.
captions = {}
count = self.photosList.count()
for i in range(count):
photo = self.photosList.item(i)
for prop in photo.captions.keys():
if prop in captions:
captions[prop] += 1
else:
captions[prop] = 1
# Rebuild the list
self._addCaptionButtonMenu.clear()
show_all = self.showAllCaptionsFlag.isChecked()
for prop in sorted(captions.keys()):
if show_all or count == captions[prop]:
self._addCaptionButtonMenu.addAction(prop, self._addCaptionHandler)
def _addCaptionHandler(self):
"""Add the selected caption field to the album captions."""
if 0 == len(self.captionsList.findItems(self.sender().text(),
QtCore.Qt.MatchFixedString)):
self.captionsList.addItem(self.sender().text())
self._dirty = True
def _removeCaptionsHandler(self):
"""Remove the selected caption fields from the album captions."""
for item in self.captionsList.selectedItems():
# removeItemWidget() doesn't seem to work
self.captionsList.takeItem(self.captionsList.indexFromItem(item).row())
self._dirty = True
def _addPropertyHandler(self):
"""Add the selected property field to the album properties."""
if 0 == len(self.propertiesList.findItems(self.sender().text(),
QtCore.Qt.MatchFixedString)):
self.propertiesList.addItem(self.sender().text())
self._dirty = True
def _removePropertiesHandler(self):
"""Remove the selected properties fields from the album
properties."""
for item in self.propertiesList.selectedItems():
# removeItemWidget() doesn't seem to work
self.propertiesList.takeItem(self.propertiesList.indexFromItem(item).row())
self._dirty = True
def _generateAlbum(self):
"""Save an album. Prompt the user for a file name, then spawn
background tasks to generate album and photo JSON, thumbnails,
and down-scaled photos. """
# Get the output file name
# Default to the file name of the current album (if it exists). Do not prompt for
# overwrite when re-saving. QFileDialog can't do that natively, so we implement that logic
# here. Note that it's still vulerable to races: a file that doesn't exist now might exist
# when we try to write to it.
selected = self._config.outputDir
if None != self._currentAlbumFileName:
selected = self._currentAlbumFileName
album_file_name = None
while True:
album_file_name = QtGui.QFileDialog.getSaveFileName(self, "Album File",
selected, self.FILTER_ALBUMS,
QtGui.QFileDialog.DontConfirmOverwrite)
if self._currentAlbumFileName == album_file_name \
or not os.path.isfile(album_file_name) \
or QtGui.QMessageBox.Yes == QtGui.QMessageBox.warning(self, "Album File",
self.tr("%s already exists.\nDo you want to replace it?") % (album_file_name),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No):
break
selected = album_file_name
if "" != album_file_name:
album_dir_name = os.path.dirname(album_file_name)
album = self._saveUIData()
album["metadataDir"] = urllib.parse.quote(Config.METADATA_DIR + "/")
album["title"] = self.titleText.toPlainText()
album["description"] = self.descriptionText.toPlainText()
album["photos"] = \
[self.photosList.item(i).getAlbumJSON() for i in range(0, self.photosList.count())]
# To prevent the output directory from being changed while generating files, we do the
# following:
# 1. Create a secure temporary directory.
# 2. Open the output directory. Get its file descriptor.
# 3. Construct the /proc/<pid>/fd/<fd> path to the directory using the file
# descriptor.
# 4. Create a symlink from the temporary directory to the /proc path. The link's name
# is unique but predictable; that's ok because the directory is secure.
# 5. Use the symlink as the path when creating files.
self._backgroundInit(3 * self.photosList.count() + 5)
tasks = []
directories = DirectoryHandleList()
# Create the output directories.
# We read and write directories from different threads, but there's no race
# because the read tasks are blocked until after the write task completes.
album_dir_task = self._threads.submit(self._bgCreateOutputDirectory, album_dir_name,
directories, "album")
tasks.append(album_dir_task)
metadata_dir_task = None
if 0 != len(Config.METADATA_DIR):
metadata_dir_task = self._threads.submit(self._bgCreateOutputDirectory,
os.path.join(album_dir_name,
Config.METADATA_DIR),
directories, "metadata")
tasks.append(metadata_dir_task)
photo_dir_task = None
if 0 != len(Config.PHOTO_DIR):
photo_dir_task = self._threads.submit(self._bgCreateOutputDirectory,
os.path.join(album_dir_name,
Config.PHOTO_DIR),
directories, "photos")
tasks.append(photo_dir_task)
thumbnail_dir_task = None
if 0 != len(Config.THUMBNAIL_DIR):
thumbnail_dir_task = self._threads.submit(self._bgCreateOutputDirectory,
os.path.join(album_dir_name,
Config.THUMBNAIL_DIR),
directories, "thumbnails")
tasks.append(thumbnail_dir_task)
# Create the album JSON file
tasks.append(self._threads.submit(self._bgGenerateAlbum, album,
lambda: os.path.join(
directories.getPath("album"),
os.path.basename(album_file_name)),
album_dir_task))
# Create the metadata, thumbnail, and image for each photo.
count = self.photosList.count()
if 0 < count:
captions = album["captionFields"]
properties = album["propertyFields"]
for i in range(0, count):
photo = self.photosList.item(i)
# In Python 3.4, I might be able to use functools.partialmethod to create a
# generic wrapper that calls self._incProgressSignal.emit() after an arbitrary
# method call, rather than needing to write wrappers for every method call.
task = self._threads.submit(self._bgGeneratePhotoJSON, photo,
lambda: directories.getPath("metadata"),
album["photoResolution"][0],
album["photoResolution"][1], captions, properties,
metadata_dir_task)
photo.addRef()
task.photoName = photo.getPath()
tasks.append(task)
task = self._threads.submit(self._bgGeneratePhoto, photo,
lambda: directories.getPath("photos"),
album["photoResolution"][0],
album["photoResolution"][1],
self._config.photoQuality, photo_dir_task)
photo.addRef()
task.photoName = photo.getPath()
tasks.append(task)
task = self._threads.submit(self._bgGenerateThumbnail, photo,
lambda: directories.getPath("thumbnails"),
Config.THUMB_WIDTH, Config.THUMB_HEIGHT,
Config.THUMB_QUALITY, thumbnail_dir_task)
photo.addRef()
task.photoName = photo.getPath()
tasks.append(task)
task = self._threads.submit(functools.partial(handle_exceptions,
self._bgTasksComplete),
tasks, directories, "generating the album", cleansing=True)
self._backgroundStart(tasks+[task])
self._config.outputDir = album_dir_name
self._currentAlbumFileName = album_file_name
self.setWindowTitle(Config.PROGRAM_NAME + ": " + os.path.basename(album_file_name))
def _bgCreateOutputDirectory(self, dir_path, directories, name):
"""Background task to create a directory and link to it from
the temporary directory."""
ensure_directory(dir_path)
dir_fd = os.open(dir_path, os.O_RDONLY)
directories.add(name, dir_fd)
self._incProgressSignal.emit()
def _bgGenerateAlbum(self, album_data, get_album_file_name, dir_creation_task):
"""Background task to generate an album JSON file."""
if None is not dir_creation_task:
concurrent.futures.wait([dir_creation_task])
Album.save(get_album_file_name(), album_data)
self._incProgressSignal.emit()
def _bgTasksComplete(self, tasks, directories, message, cleansing=False):
"""Background task to display any errors encountered while
executing background tasks and clean up any file descriptors
and links that were needed by the background tasks."""
# Wait for the tasks to complete.
(done, not_done) = concurrent.futures.wait(tasks)
assert 0 == len(not_done)
# Close any file descriptors. Ignore errors.
directories.closeAll()
# Display any error messages
errors = []
for task in done:
try:
task.result()
except concurrent.futures.CancelledError:
pass
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
# convert failed or timed out.
errors.append("Error resizing " + task.photoName)
except (SaveError) as exc:
errors.append(str(exc))
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
errors.append(str(exc_type) + ": " + str(exc_value))
if 0 != len(errors):
self._showErrorSignal.emit("%d errors were encountered while %s:\n" %
(len(errors), message) + "\n".join(errors))
# Dismiss the cancellation UI
self._backgroundCompleteSignal.emit(False)
# Mark the document as no longer dirty.
if cleansing:
self._dirtySignal.emit(False)
def _bgGeneratePhotoJSON(self, photo, get_out_dir_name, width, height, captions, properties,
dir_creation_task):
"""Background task to generate a photo JSON file."""
# Wait for the directory to be created, then generate the photo JSON
if None is not dir_creation_task:
concurrent.futures.wait([dir_creation_task])
photo.generateJSON(get_out_dir_name(), width, height, captions, properties)
photo.release()
self._incProgressSignal.emit()
def _bgGeneratePhoto(self, photo, get_out_dir_name, width, height, quality, dir_creation_task):
"""Background task to generate a down-scaled photo."""
# Wait for the directory to be created, then generate the photo
if None is not dir_creation_task:
concurrent.futures.wait([dir_creation_task])
photo.generatePhoto(get_out_dir_name(), width, height, quality)
photo.release()
self._incProgressSignal.emit()
def _bgGenerateThumbnail(self, photo, get_out_dir_name, width, height, quality,
dir_creation_task):
"""Background task to generate a photo thumbnail."""
# Wait for the directory to be created, then generate the thumbnail
if None is not dir_creation_task:
concurrent.futures.wait([dir_creation_task])
photo.generateThumbnail(get_out_dir_name(), width, height, quality)
photo.release()
self._incProgressSignal.emit()
def _closeAlbum(self, use_defaults):
"""Clear the current album data."""
# Clear the selected photos. I can't just call clear() because there's cleanup to do.
self.photosList.selectAll()
self._removePhotosHandler()
# Clear selections and text fields. Restore defaults if available.
if use_defaults and None is not self._config.uiData:
self._restoreUIData(self._config.uiData)
else:
self.captionsList.clear()
self.propertiesList.clear()
self.footerText.setPlainText(None)
self.photoSizeButton.setCurrentIndex(0)
self.titleText.setPlainText(None)
self.descriptionText.setPlainText(None)
self._currentAlbumFileName = None
self.setWindowTitle(Config.PROGRAM_NAME)
self.generateAlbumButton.setVisible(False)
self._dirty = False
def _newAlbum(self):
"""Create a new album."""
# Prompt if the album is dirty.
if not self._dirty or 0 == self.photosList.count() \
or QtGui.QMessageBox.Yes == QtGui.QMessageBox.warning(self, "New Album",
"The current album has not been saved. Realy discard it?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No):
self._closeAlbum(use_defaults=True)
def _openAlbum(self):
"""Prompt the user for an album JSON file to load then spawn a
background task to load it."""
# Prompt if the album is dirty.
if not self._dirty or 0 == self.photosList.count() \
or QtGui.QMessageBox.Yes == QtGui.QMessageBox.warning(self, "Open Album",
"The current album has not been saved. Realy discard it?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No):
album_file_name = QtGui.QFileDialog.getOpenFileName(self, "Select album",
self._config.outputDir,
self.tr(self.FILTER_ALBUMS))
# The QT documentation says that getOpenFileName returns a null string on cancel. But
# it returns an empty string here. Maybe that's a PyQt bug?
if "" != album_file_name:
self._closeAlbum(use_defaults=False)
# Load the file in a background thread.
self._backgroundInit(1)
task = self._threads.submit(functools.partial(handle_exceptions,
self._bgLoadAlbum), album_file_name)
self._backgroundStart([task])
def _bgLoadAlbum(self, album_file_name):
"""Background task to open and parse an album JSON file."""
try:
data = Album.load(album_file_name)
# Call back to the foreground to populate the UI.
self._setAlbumDataSignal.emit(album_file_name, data)
except (OSError) as exc:
self._showErrorSignal.emit("Error reading '%s': %s." %
(os.path.basename(album_file_name)), str(exc))
except (ParseError) as exc:
self._showErrorSignal.emit("Error loading an album from '%s': %s" %
(os.path.basename(album_file_name)), str(exc))
self._backgroundCompleteSignal.emit(False)
def _setAlbumData(self, album_file_name, data):
"""Pushes data from a loaded album JSON file into the UI."""
try:
self._restoreUIData(data, require_fields=True)
self.titleText.setPlainText(data["title"])
self.descriptionText.setPlainText(data["description"])
photos = []
for photo in data["photos"]:
path = urllib.parse.unquote(photo["path"])
photos.append((os.path.expanduser(path), os.path.basename(path)))
self._addPhotoFiles(photos, dirtying=False)
self._currentAlbumFileName = album_file_name
self.setWindowTitle(Config.PROGRAM_NAME + ": " + os.path.basename(album_file_name))
self._dirty = False
except KeyError:
QtGui.QMessageBox.warning(None, Config.PROGRAM_NAME,
"Unable to load an album from '%s'." %
(os.path.basename(album_file_name)),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
def _installTemplate(self):
"""Install the photo album template files. Prompt the user for
a directory, then copy the files over on background threads."""
# Get the destination directory
# Using the Qt directory chooser to work around bug 2014-06-06_001.
out_dir = QtGui.QFileDialog.getExistingDirectory(self, "Album directory",
self._config.outputDir,
QtGui.QFileDialog.ShowDirsOnly|
QtGui.QFileDialog.DontUseNativeDialog)
if "" != out_dir:
self._backgroundInit(len(Config.TEMPLATE_FILE_NAMES) + 1)
tasks = []
directories = DirectoryHandleList()
# Create the directory.
album_dir_task = self._threads.submit(self._bgCreateOutputDirectory, out_dir,
directories, "album")
tasks.append(album_dir_task)
# Spawn background tasks to do the copying.
for name in Config.TEMPLATE_FILE_NAMES:
tasks.append(self._threads.submit(self._bgCopyFile, os.path.join(DATA_PATH, name),
lambda filename=name: os.path.join(
directories.getPath("album"), filename),
album_dir_task))
task = self._threads.submit(functools.partial(handle_exceptions,
self._bgTasksComplete),
tasks, directories, "installing the template")
self._backgroundStart(tasks+[task])
def _bgCopyFile(self, source, get_destination, dir_creation_task):
"""Background task to copy a file."""
# Wait for the directory to be created, then copy the file
if None is not dir_creation_task:
concurrent.futures.wait([dir_creation_task])
shutil.copyfile(source, get_destination())
self._incProgressSignal.emit()
def _cancelBackgroundTasks(self):
"""Attempt to cancel any pending background tasks."""
if None is not self._backgroundTasks:
for task in reversed(self._backgroundTasks):
task.cancel()
concurrent.futures.wait(self._backgroundTasks)
self._backgroundComplete(True)
def _renamePhotos(self, photo_names):
"""Prompt the user to rename photos that share names with other
photos that have already been loaded, then attempt to load them
again using the new names."""
prompt_dialog = QtGui.QMessageBox(self)
prompt_dialog.setIcon(QtGui.QMessageBox.Question)
rename_button = prompt_dialog.addButton("Rename...", QtGui.QMessageBox.YesRole)
prompt_dialog.addButton("Remove", QtGui.QMessageBox.NoRole)
# Get new names for the files
new_names = []
for photo_name in photo_names:
prompt_dialog.setText("There is already a photo with the name %s in the album. " \
"Would you like to rename or remove the new one?" % (photo_name))
prompt_dialog.exec_()
if rename_button is prompt_dialog.clickedButton():
# It seems that if I try to re-use the QFileDialog, changing the selected file has
# no effect.
file_dialog = QtGui.QFileDialog(self, "New photo name", self._config.tempDir.name,
self.FILTER_IMAGES)
file_dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
file_dialog.setFileMode(QtGui.QFileDialog.AnyFile)
# The PhotoFile class won't let the user overwrite anything, but with overwrite
# confirmations on, QFileDialog prompts to overwrite the directory if a user hits
# "Save" with nothing selected. Disabling confirmation avoids this.
file_dialog.setOption(QtGui.QFileDialog.DontConfirmOverwrite)
file_dialog.selectFile(os.path.basename(photo_name))
file_dialog.exec_()
if 0 < len(file_dialog.selectedFiles()):
assert 1 == len(file_dialog.selectedFiles())
new_file_name = file_dialog.selectedFiles()[0]
new_names.append((photo_name, os.path.basename(new_file_name)))
# Spawn background tasks to load the files using the new names.
self._addPhotoFiles(new_names)
def _about(self):
"""Show the help dialog."""
dialog = QtGui.QDialog(self)
dialog.ui = Ui_AboutDialog()
dialog.ui.setupUi(dialog)
dialog.ui.closeButton.clicked.connect(dialog.close)
dialog.show()
def main():
"""Main."""
app = QtGui.QApplication(sys.argv)
# Check that the Python version is at least 3.3, that we're on an OS with /proc/<pid>/fd/<fd>,
# and that exiftool and convert are available. Error out if not.
if sys.version_info.major < 3 or (sys.version_info.major == 3 and sys.version_info.minor < 3):
QtGui.QMessageBox.critical(None, Config.PROGRAM_NAME,
"This program requires Python 3.3 or newer.",
QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
sys.exit(1)
try:
with open("/proc/%d/fd/0" % (os.getpid())) as fd:
pass
except IOError:
QtGui.QMessageBox.critical(None, Config.PROGRAM_NAME,
"This program currently only runs on Linux.",
QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
sys.exit(1)
try:
subprocess.check_call(["exiftool", "-ver"], stdout=subprocess.DEVNULL, timeout=1)
except (IOError, subprocess.CalledProcessError, subprocess.TimeoutExpired):
QtGui.QMessageBox.critical(None, Config.PROGRAM_NAME,
"This program requires that 'exiftool' be available in your " \
"PATH.", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
sys.exit(1)
try:
subprocess.check_call(["convert", "--version"], stdout=subprocess.DEVNULL, timeout=1)
except (IOError, subprocess.CalledProcessError, subprocess.TimeoutExpired):
QtGui.QMessageBox.critical(None, Config.PROGRAM_NAME, "This program requires that " \
"'convert' from the 'ImageMagick' package be available in " \
"your PATH.", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
sys.exit(1)
with Config() as config:
wnd = DyphalUI(config)
wnd.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
JulienMcJay/eclock | refs/heads/master | windows/Python27/Lib/site-packages/Cython/Plex/__init__.py | 135 | #=======================================================================
#
# Python Lexical Analyser
#
#=======================================================================
"""
The Plex module provides lexical analysers with similar capabilities
to GNU Flex. The following classes and functions are exported;
see the attached docstrings for more information.
Scanner For scanning a character stream under the
direction of a Lexicon.
Lexicon For constructing a lexical definition
to be used by a Scanner.
Str, Any, AnyBut, AnyChar, Seq, Alt, Opt, Rep, Rep1,
Bol, Eol, Eof, Empty
Regular expression constructors, for building pattern
definitions for a Lexicon.
State For defining scanner states when creating a
Lexicon.
TEXT, IGNORE, Begin
Actions for associating with patterns when
creating a Lexicon.
"""
from Actions import TEXT, IGNORE, Begin
from Lexicons import Lexicon, State
from Regexps import RE, Seq, Alt, Rep1, Empty, Str, Any, AnyBut, AnyChar, Range
from Regexps import Opt, Rep, Bol, Eol, Eof, Case, NoCase
from Scanners import Scanner
|
ktan2020/legacy-automation | refs/heads/master | win/Lib/test/test_popen.py | 10 | #! /usr/bin/env python
"""Basic tests for os.popen()
Particularly useful for platforms that fake popen.
"""
import unittest
from test import test_support
import os, sys
# Test that command-lines get down as we expect.
# To do this we execute:
# python -c "import sys;print sys.argv" {rest_of_commandline}
# This results in Python being spawned and printing the sys.argv list.
# We can then eval() the result of this, and see what each argv was.
python = sys.executable
class PopenTest(unittest.TestCase):
def _do_test_commandline(self, cmdline, expected):
cmd = '%s -c "import sys;print sys.argv" %s' % (python, cmdline)
data = os.popen(cmd).read() + '\n'
got = eval(data)[1:] # strip off argv[0]
self.assertEqual(got, expected)
def test_popen(self):
self.assertRaises(TypeError, os.popen)
self._do_test_commandline(
"foo bar",
["foo", "bar"]
)
self._do_test_commandline(
'foo "spam and eggs" "silly walk"',
["foo", "spam and eggs", "silly walk"]
)
self._do_test_commandline(
'foo "a \\"quoted\\" arg" bar',
["foo", 'a "quoted" arg', "bar"]
)
test_support.reap_children()
def test_return_code(self):
self.assertEqual(os.popen("exit 0").close(), None)
if os.name == 'nt':
self.assertEqual(os.popen("exit 42").close(), 42)
else:
self.assertEqual(os.popen("exit 42").close(), 42 << 8)
def test_main():
test_support.run_unittest(PopenTest)
if __name__ == "__main__":
test_main()
|
jaywink/diaspora-hub | refs/heads/master | config/production.py | 2 | '''
Production Configurations
'''
from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Production middleware
# ------------------------------------------------------------------------------
PRODUCTION_MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
)
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE = PRODUCTION_MIDDLEWARE + MIDDLEWARE
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='The Federation <noreply@thefederation.local>')
# Set this to "django.core.mail.backends.smtp.EmailBackend" for SMTP email sending
EMAIL_BACKEND = env("DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend")
# Define these as necessary if using the SMTP backend
# See https://docs.djangoproject.com/en/1.10/topics/email/#smtp-backend
EMAIL_HOST = env("DJANGO_EMAIL_HOST", default="localhost")
EMAIL_PORT = env("DJANGO_EMAIL_PORT", default=587)
EMAIL_HOST_USER = env("DJANGO_EMAIL_HOST_USER", default="")
EMAIL_HOST_PASSWORD = env("DJANGO_EMAIL_HOST_PASSWORD", default="")
EMAIL_USE_TLS = env.bool("DJANGO_EMAIL_USE_TLS", default=True)
EMAIL_USE_SSL = env.bool("DJANGO_EMAIL_USE_SSL", default=False)
EMAIL_TIMEOUT = env("DJANGO_EMAIL_TIMEOUT", default=None)
EMAIL_SSL_KEYFILE = env("DJANGO_EMAIL_SSL_KEYFILE", default=None)
EMAIL_SSL_CERTFILE = env("DJANGO_EMAIL_SSL_CERTFILE", default=None)
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# CACHING
# ------------------------------------------------------------------------------
# Guard against old setting
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://%s:%s/%s" % (REDIS_HOST, REDIS_PORT, REDIS_DB),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"PASSWORD": REDIS_PASSWORD,
}
}
}
# RQ
# --
RQ_QUEUES["default"]["USE_REDIS_CACHE"] = "default"
# SENTRY
# ------
# If you wish to configure Sentry for error reporting, first create your
# Sentry account and then place the DSN in `.env` as `SENTRY_DSN=dsnhere`.
if env('SENTRY_DSN', default=None):
INSTALLED_APPS += ('raven.contrib.django.raven_compat',)
SENTRY_DSN = env('SENTRY_DSN')
RAVEN_CONFIG = {
'dsn': SENTRY_DSN,
'site': THEFEDERATION_DOMAIN,
}
LOGGING['handlers']['sentry'] = {
'level': env('SENTRY_LEVEL', default='ERROR'),
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
}
LOGGING['loggers']['thefederation']['handlers'].append('sentry')
LOGGING['loggers']['federation']['handlers'].append('sentry')
LOGGING['loggers']['rq_scheduler.scheduler']['handlers'].append('sentry')
LOGGING['root'] = {
'level': 'WARNING',
'handlers': ['sentry', 'file'],
}
LOGGING['loggers']['raven'] = {
'level': 'DEBUG',
'handlers': ['file'],
'propagate': False,
}
LOGGING['loggers']['sentry.errors'] = {
'level': 'DEBUG',
'handlers': ['file'],
'propagate': False,
}
|
kingland/go-v8 | refs/heads/master | v8-3.28/build/gyp/test/win/gyptest-link-debug-info.py | 344 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure debug info setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('debug-info.gyp', chdir=CHDIR)
test.build('debug-info.gyp', test.ALL, chdir=CHDIR)
suffix = '.exe.pdb' if test.format == 'ninja' else '.pdb'
test.built_file_must_not_exist('test_debug_off%s' % suffix, chdir=CHDIR)
test.built_file_must_exist('test_debug_on%s' % suffix, chdir=CHDIR)
test.pass_test()
|
be-cloud-be/horizon-addons | refs/heads/9.0 | server/addons/website_partner/__init__.py | 616 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import models
import controllers
|
mt2d2/servo | refs/heads/master | python/mach/mach/test/test_logger.py | 128 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, unicode_literals
import logging
import time
import unittest
from mach.logging import StructuredHumanFormatter
from mozunit import main
class DummyLogger(logging.Logger):
def __init__(self, cb):
logging.Logger.__init__(self, 'test')
self._cb = cb
def handle(self, record):
self._cb(record)
class TestStructuredHumanFormatter(unittest.TestCase):
def test_non_ascii_logging(self):
# Ensures the formatter doesn't choke when non-ASCII characters are
# present in printed parameters.
formatter = StructuredHumanFormatter(time.time())
def on_record(record):
result = formatter.format(record)
relevant = result[9:]
self.assertEqual(relevant, 'Test: s\xe9curit\xe9')
logger = DummyLogger(on_record)
value = 's\xe9curit\xe9'
logger.log(logging.INFO, 'Test: {utf}',
extra={'action': 'action', 'params': {'utf': value}})
if __name__ == '__main__':
main()
|
byt3bl33d3r/Empire | refs/heads/master | lib/modules/powershell/situational_awareness/network/powerview/get_domain_policy.py | 4 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-DomainPolicyData',
'Author': ['@harmj0y','@DisK0nn3cT','@OrOneEqualsOne'],
'Description': ('Returns the default domain or DC policy for a given domain or domain controller. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Policy' : {
'Description' : 'Extract Domain or DC (domain controller) policies, or All',
'Required' : True,
'Value' : 'Domain'
},
'Domain' : {
'Description' : 'The domain to query for default policies, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'Server' : {
'Description' : 'Specifies an Active Directory server (domain controller) to bind to.',
'Required' : False,
'Value' : ''
},
'ServerTimeLimit' : {
'Description' : 'Specifies the maximum amount of time the server spends searching. Default of 120 seconds',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.strip_powershell_comments(moduleCode)
pscript = ""
expand = False
value_to_expand = ""
for option,values in self.options.iteritems():
if option.lower() != "agent" and option.lower() != "expandobject":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
pscript += " -" + str(option)
else:
pscript += " -" + str(option) + " " + str(values['Value'])
if option.lower() == "expandobject" and values['Value']:
expand = True
value_to_expand += values['Value']
if expand:
script += "(" + moduleName + " " + pscript + ")." + "'" + value_to_expand + "'" + ' | fl | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
else:
script += "\n" + moduleName + " " + pscript + ' | fl | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed! Use ExpandObject option to expand one of the objects above such as \'System Access\'"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
|
benschmaus/catapult | refs/heads/master | third_party/gsutil/gslib/tests/__init__.py | 38 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains test cases for gsutil."""
from __future__ import absolute_import
|
denisenkom/django | refs/heads/master | django/contrib/staticfiles/handlers.py | 106 | from django.conf import settings
from django.core.handlers.base import get_path_info
from django.core.handlers.wsgi import WSGIHandler
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six.moves.urllib.request import url2pathname
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
def __init__(self, application, base_dir=None):
self.application = application
if base_dir:
self.base_dir = base_dir
else:
self.base_dir = self.get_base_dir()
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
|
bOOm-X/spark | refs/heads/master | python/docs/epytext.py | 85 | import re
RULES = (
(r"<(!BLANKLINE)[\w.]+>", r""),
(r"L{([\w.()]+)}", r":class:`\1`"),
(r"[LC]{(\w+\.\w+)\(\)}", r":func:`\1`"),
(r"C{([\w.()]+)}", r":class:`\1`"),
(r"[IBCM]{([^}]+)}", r"`\1`"),
('pyspark.rdd.RDD', 'RDD'),
)
def _convert_epytext(line):
"""
>>> _convert_epytext("L{A}")
:class:`A`
"""
line = line.replace('@', ':')
for p, sub in RULES:
line = re.sub(p, sub, line)
return line
def _process_docstring(app, what, name, obj, options, lines):
for i in range(len(lines)):
lines[i] = _convert_epytext(lines[i])
def setup(app):
app.connect("autodoc-process-docstring", _process_docstring)
|
zerc/django | refs/heads/master | tests/template_tests/filter_tests/test_striptags.py | 324 | from django.template.defaultfilters import striptags
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class StriptagsTests(SimpleTestCase):
@setup({'striptags01': '{{ a|striptags }} {{ b|striptags }}'})
def test_striptags01(self):
output = self.engine.render_to_string(
'striptags01',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
@setup({'striptags02': '{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}'})
def test_striptags02(self):
output = self.engine.render_to_string(
'striptags02',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
class FunctionTests(SimpleTestCase):
def test_strip(self):
self.assertEqual(
striptags('some <b>html</b> with <script>alert("You smell")</script> disallowed <img /> tags'),
'some html with alert("You smell") disallowed tags',
)
def test_non_string_input(self):
self.assertEqual(striptags(123), '123')
|
evanccnyc/ansible | refs/heads/devel | test/units/parsing/__init__.py | 7690 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
lwerdna/chess | refs/heads/master | BpgnParser.py | 1 | #!/usr/bin/python
# Copyright 2012, 2013 Andrew Lamoureux
#
# This file is a part of FunChess
#
# FunChess is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python
# parse BPGN (game metadata + moves) into position strings
import os
import re
import sys
import Common
import ChessMove
import PgnTokenizer
###############################################################################
# Match
# - contains tags, comments, moves, and states of a bughouse chess match
# - is able to load itself from bpgn match text
###############################################################################
class PgnChessMatch:
def __init__(self):
self.initState = Common.initChessFEN
self.moves = []
self.tags = {}
self.comments = []
self.states = [self.initState]
def incrMoveNum(self, fullMove):
old = fullMove
m = re.match(r'^(\d+)([AaBb])$', fullMove)
[num, letter] = [int(m.group(1)), m.group(2)]
if letter in 'ab':
num += 1
letter = {'A':'a', 'a':'A', 'B':'b', 'b':'B'}[letter]
new = str(num) + letter
#print "incremented from %s to %s" % (old, new)
return new
def sanityCheck(self):
# does the game have ANY moves in it?
if len(self.moves) == 0:
raise MatchZeroMovesException("no moves recorded")
# does the game have missing/out-of-order moves in it?
expectA = '1A'
expectB = '1B'
for m in self.moves:
if 'TIME_FORFEIT' in m.flags:
continue
# bughouse-db games store a repeated move at the end when
# that player forfeits on time
fullMove = m.moveNum + m.player
if fullMove == expectA:
expectA = self.incrMoveNum(expectA)
elif fullMove == expectB:
expectB = self.incrMoveNum(expectB)
else:
raise MatchMovesOOOException("expected move %s or %s (got instead:\n %s)" % \
(expectA, expectB, str(m)))
# - parses, populates the tags member
# - parses, populates the moves member
# - parses, populates the comments member
# - calculates the states member
#
def parsePgn(self, text):
tokens = PgnTokenizer.tokenize(text)
moveNum = 1
player = 'W'
while tokens:
token = tokens.pop(0)
print "on token: -%s-" % token
# tag tokens eg: [Event "May 2013 Tourney"]
m = re.match(r'\[(.*?) "(.*?)"\]', token)
if m:
self.tags[m.group(1)] = m.group(2)
continue
# comment tokens eg: { good move! also consider Rxe8 }
m = re.match('^{(.*)}$', token)
if m:
# if we're in the moves section, comment applies to a move
if self.moves:
self.moves[-1].addComment(m.group(1))
# else it applies to the match comments
else:
self.comments.append(m.group(1))
continue
# result tokens eg: 0-1
m = re.match(Common.regexResults, token)
if m:
self.result = token
if tokens:
raise Exception("result token was not the final token! next is: " + tokens[0])
continue
# move number token eg: 34.
m = re.match(r'(\d+)\.', token)
if m:
if moveNum == int(m.group(1)) + 1:
raise Exception("out of order move number: " + token)
moveNum += 1;
player = 'W'
# normal move (SAN)
m = re.match(Common.regexSanChess, token)
if m:
move = ChessMove.ChessMove()
move.moveNum = moveNum
move.player = player
move.san = token
self.moves.append(move)
player = {'W':'B', 'B':'W'}[player]
# calculate all board states
self.states = [self.initState]
for move in self.moves:
# exceptions (repeated moves due to time forfeiture, etc.) just carry state along...
if 'TIME_FORFEIT' in move.flags:
self.states.append(self.states[-1])
continue
currState = self.states[-1]
nextState = x.nextState(self.states[-1], move.san)
print "current state: " + currState
print "next state: " + nextState
self.states.append(nextState)
def __str__(self):
answer = ''
#answer = '%s[%s],%s[%s] vs %s[%s],%s[%s]\n' % ( \
# self.tags['WhiteA'], self.tags['WhiteAElo'], self.tags['BlackA'], self.tags['BlackAElo'], \
# self.tags['BlackB'], self.tags['BlackBElo'], self.tags['WhiteA'], self.tags['WhiteAElo'] \
#)
answer += "TAGS:\n"
for tag,value in self.tags.iteritems():
answer += "%s: \"%s\"\n" % (tag, value)
answer += "COMMENTS:\n"
for c in self.comments:
answer += c + "\n"
answer += "MOVES (%d total):\n" % len(self.moves)
for m in self.moves:
answer += str(m) + "\n"
return answer
###############################################################################
# MatchIteratorFile
# - return matches from file containing multiple matches
# - basically, split the text around '[Event "..."]' tags
###############################################################################
class PgnChessMatchIteratorFile:
def __init__(self, path):
self.path = path
self.fp = open(path, 'r')
self.lineNum = -1
def __iter__(self):
self.fp.seek(0, 0)
self.lineNum = -1
return self
def peekLine(self, doStrip=1):
line = self.fp.readline()
self.fp.seek(-1*len(line), 1)
if doStrip:
line = line.rstrip()
return line
def readLine(self):
self.lineNum += 1
temp = self.fp.readline()
#print "read: %s" % temp
return temp
def consumeNewLines(self):
while 1:
line = self.peekLine(False)
if not line:
return False
if not re.match(r'^\s+$', line):
break
self.readLine()
return True
# strategy here is simple: consume lines until an Event tag is found
# in other words, Event tags delimit the matches
def next(self):
if not self.consumeNewLines():
raise StopIteration
matchText = self.readLine()
if not re.match(r'^\[Event', matchText):
raise Exception(("expected Event tag at %s:%d\n" + \
"(instead got: %s)") % (self.path, self.lineNum, matchText))
# so long as the next line is not an Event tag, add to current match
while 1:
line = self.peekLine()
if not re.match(r'^\[Event', line):
matchText += '\n' + line
if not self.readLine():
break
else:
break
# return a match
match = PgnChessMatch()
match.path = self.path
match.parsePgn(matchText)
return match
def __del__(self):
if self.fp:
self.fp.close()
self.fp = None
###############################################################################
# MatchIteratorDir
# - return matches from a directory containing files
# - basically, loop over MatchIteratorFile for every file in a directory
###############################################################################
class PgnChessMatchIteratorDir:
def __init__(self, path):
self.walkObj = os.walk(path)
self.matchIterFileObj = None
self.filesList = []
def __iter__(self):
return self
def next(self):
while 1:
# first level: does the file iterator still have something left?
if self.matchIterFileObj:
try:
return self.matchIterFileObj.next()
except StopIteration:
self.matchIterFileObj = None
# second level, is current list of files exhausted? can we create a new
# file iterator?
if self.filesList:
self.matchIterFileObj = MatchIteratorFile(self.filesList.pop())
continue
# third level: no file iterator, no files list, descend!
# purposely don't trap exception: StopIterations should bubble up and tell
# caller that we're done
(root, subFolder, files) = self.walkObj.next()
for f in files:
(dummy, ext) = os.path.splitext(f)
if ext == '.bpgn':
self.filesList.append(os.path.join(root, f))
###############################################################################
# main()
###############################################################################
if __name__ == '__main__':
gamesCount = 0
goodGamesCount = 0
path = sys.argv[1]
it = None
if os.path.isfile(path):
it = MatchIteratorFile(path)
elif os.path.isdir(path):
it = MatchIteratorDir(path)
else:
raise Exception("WTF?")
for m in it:
gamesCount += 1
try:
m.sanityCheck()
except MatchMovesOOOException as e:
print "%s: skipping match due to out of order (or missing) moves\n%s\n%s" % (m.path, '\n'.join(m.comments), str(e))
continue
except MatchZeroMovesException as e:
print "%s: skipping match due to it being empty (no moves whatsoever)\n%s\n%s" % (m.path, '\n'.join(m.comments), str(e))
continue
for s in m.states:
print s
goodGamesCount += 1
#raw_input("hit enter for next game")
print "%d/%d games are good (%02.2f%%)" % (goodGamesCount, gamesCount, 100.0*goodGamesCount/gamesCount)
|
zlatinski/p-android-omap-3.4-new-ion-topic-sync-dma-buf-fence2 | refs/heads/p-android-omap-3.4-new-ion | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
bplower/cssef | refs/heads/refactor | WebInterface/WebInterface/modules/organization/urls.py | 1 | from django.conf.urls import patterns
from django.conf.urls import url
from WebInterface.modules.organization import views
urlpatterns = patterns('',
url(r'^(?P<organizationId>\d+)/$', views.home),
url(r'^(?P<organizationId>\d+)/home/$', views.home),
url(r'^(?P<organizationId>\d+)/members/$', views.members),
url(r'^(?P<organizationId>\d+)/members/edit/(?P<username>[\w\-\_]{0,50})/$', views.members),
url(r'^(?P<organizationId>\d+)/members/delete/(?P<username>[\w\-\_]{0,50})/$', views.members),
url(r'^(?P<organizationId>\d+)/settings/$', views.settings),
url(r'^(?P<organizationId>\d+)/plugin/(?P<plugin_name>[\w\-\_]{0,50})/$', views.compplugin_list),
url(r'^(?P<organizationId>\d+)/plugin/(?P<plugin_name>[\w\-\_]{0,50})/create/$', views.compplugin_create),
#url(r'^(?P<organizationId>\d+)/plugin/(?P<plugin_name>[\w\-\_]{0,50})/delete/$', views.compplugin_delete),
) |
glwu/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/subprocess.py | 43 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() and check_output() will raise CalledProcessError, if the
called process returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import sys
mswindows = (sys.platform == "win32")
import io
import os
import traceback
import gc
import signal
import builtins
import warnings
import errno
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
import _subprocess
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import fcntl
import pickle
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
warnings.warn("The _posixsubprocess module is not being used. "
"Child process reliability may suffer if your "
"program uses threads.", RuntimeWarning)
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
_FD_CLOEXEC = getattr(fcntl, 'FD_CLOEXEC', 1)
def _set_cloexec(fd, cloexec):
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | _FD_CLOEXEC)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~_FD_CLOEXEC)
if _posixsubprocess:
_create_pipe = _posixsubprocess.cloexec_pipe
else:
def _create_pipe():
fds = os.pipe()
_set_cloexec(fds[0], True)
_set_cloexec(fds[1], True)
return fds
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError"]
if mswindows:
from _subprocess import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None and res >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except (OSError, IOError) as e:
if e.errno == errno.EINTR:
continue
raise
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(*popenargs, stdout=PIPE, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
# NB This only works (and is only relevant) for POSIX.
def getstatusoutput(cmd):
"""Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if bufsize is None:
bufsize = 0 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
if mswindows:
if p2cwrite != -1:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread != -1:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread != -1:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if self.universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True)
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting
for f in filter(None, [self.stdin, self.stdout, self.stderr]):
try:
f.close()
except EnvironmentError:
# Ignore EBADF or other errors
pass
raise
def _translate_newlines(self, data, encoding):
data = data.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
return data.decode(encoding)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
if self.stdin:
self.stdin.close()
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize, _active=_active):
# If __init__ hasn't had a chance to execute (e.g. if it
# was passed an undeclared keyword argument), we don't
# have a _child_created attribute at all.
if not getattr(self, '_child_created', False):
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def poll(self):
return self._internal_poll()
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
handle, _subprocess.GetCurrentProcess(), 0, 1,
_subprocess.DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_subprocess.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
if (_subprocess.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C won't
# kill children.
creationflags |= _subprocess.CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error as e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_subprocess.WaitForSingleObject,
_WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0,
_GetExitCodeProcess=_subprocess.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
_subprocess.WaitForSingleObject(self._handle,
_subprocess.INFINITE)
self.returncode = _subprocess.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.daemon = True
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.daemon = True
stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE:
raise
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
self.wait()
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
_subprocess.TerminateProcess(self._handle, 1)
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = _create_pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = _create_pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = _create_pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _close_fds(self, fds_to_keep):
start_fd = 3
for fd in sorted(fds_to_keep):
if fd >= start_fd:
os.closerange(start_fd, fd)
start_fd = fd + 1
if start_fd <= MAXFD:
os.closerange(start_fd, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, str):
args = [args]
else:
args = list(args)
if shell:
args = ["/system/bin/sh", "-c"] + args # Android Hack 5-Apr-2012 ... there should be a more elegant approach.
if executable:
args[0] = executable
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = _create_pipe()
try:
try:
if _posixsubprocess:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = [os.fsencode(k) + b'=' + os.fsencode(v)
for k, v in env.items()]
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
else:
# Pure Python implementation: It is not thread safe.
# This implementation may deadlock in the child if your
# parent process has any other threads running.
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. See issue1336
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite != -1:
os.close(p2cwrite)
if c2pread != -1:
os.close(c2pread)
if errread != -1:
os.close(errread)
os.close(errpipe_read)
# When duping fds, if there arises a situation
# where one of the fds is either 0, 1 or 2, it
# is possible that it is overwritten (#12607).
if c2pwrite == 0:
c2pwrite = os.dup(c2pwrite)
if errwrite == 0 or errwrite == 1:
errwrite = os.dup(errwrite)
# Dup fds for child
def _dup2(a, b):
# dup2() removes the CLOEXEC flag but
# we must do it ourselves if dup2()
# would be a no-op (issue #10806).
if a == b:
_set_cloexec(a, False)
elif a != -1:
os.dup2(a, b)
_dup2(p2cread, 0)
_dup2(c2pwrite, 1)
_dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the
# same fd more than once, or standard fds.
closed = set()
for fd in [p2cread, c2pwrite, errwrite]:
if fd > 2 and fd not in closed:
os.close(fd)
closed.add(fd)
# Close all other fds, if asked for
if close_fds:
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self._close_fds(fds_to_keep)
if cwd is not None:
os.chdir(cwd)
# This is a copy of Python/pythonrun.c
# _Py_RestoreSignals(). If that were exposed
# as a sys._py_restoresignals func it would be
# better.. but this pure python implementation
# isn't likely to be used much anymore.
if restore_signals:
signals = ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ')
for sig in signals:
if hasattr(signal, sig):
signal.signal(getattr(signal, sig),
signal.SIG_DFL)
if start_new_session and hasattr(os, 'setsid'):
os.setsid()
if preexec_fn:
preexec_fn()
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
try:
exc_type, exc_value = sys.exc_info()[:2]
if isinstance(exc_value, OSError):
errno_num = exc_value.errno
else:
errno_num = 0
message = '%s:%x:%s' % (exc_type.__name__,
errno_num, exc_value)
message = message.encode(errors="surrogatepass")
os.write(errpipe_write, message)
except Exception:
# We MUST not allow anything odd happening
# above to prevent us from exiting below.
pass
# This exitcode won't be reported to applications
# so it really doesn't matter what we return.
os._exit(255)
# Parent
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
if p2cread != -1 and p2cwrite != -1:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1:
os.close(c2pwrite)
if errwrite != -1 and errread != -1:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
data = bytearray()
while True:
part = _eintr_retry_call(os.read, errpipe_read, 50000)
data += part
if not part or len(data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if data:
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
try:
exception_name, hex_errno, err_msg = data.split(b':', 2)
except ValueError:
print('Bad exception data:', repr(data))
exception_name = b'RuntimeError'
hex_errno = b'0'
err_msg = b'Unknown'
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
RuntimeError)
for fd in (p2cwrite, c2pread, errread):
if fd != -1:
os.close(fd)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
err_msg += ': ' + repr(args[0])
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope."""
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
sts = 0
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input)
else:
stdout, stderr = self._communicate_with_select(input)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
self.wait()
return (stdout, stderr)
def _communicate_with_poll(self, input):
stdout = None # Return
stderr = None # Return
fd2file = {}
fd2output = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
fd2file[fd].close()
fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
fd2output[self.stdout.fileno()] = stdout = []
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
fd2output[self.stderr.fileno()] = stderr = []
input_offset = 0
while fd2file:
try:
ready = poller.poll()
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
input_offset += os.write(fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
close_unregister_and_remove(fd)
else:
raise
else:
if input_offset >= len(input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin and input:
write_set.append(self.stdin)
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [])
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
if self.stdin in wlist:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
bytes_written = os.write(self.stdin.fileno(), chunk)
except OSError as e:
if e.errno == errno.EPIPE:
self.stdin.close()
write_set.remove(self.stdin)
else:
raise
else:
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if not data:
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if not data:
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print("Process list:")
print(plist)
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print("Looking for 'hda'...")
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print(repr(p2.communicate()[0]))
#
# Example 4: Catch execution error
#
print()
print("Trying a weird file...")
try:
print(Popen(["/this/path/does/not/exist"]).communicate())
except OSError as e:
if e.errno == errno.ENOENT:
print("The file didn't exist. I thought so...")
print("Child traceback:")
print(e.child_traceback)
else:
print("Error", e.errno)
else:
print("Gosh. No error.", file=sys.stderr)
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print("Looking for 'PROMPT' in set output...")
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print(repr(p2.communicate()[0]))
#
# Example 2: Simple execution of program
#
print("Executing calc...")
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
|
flashycud/timestack | refs/heads/master | django/__init__.py | 76 | VERSION = (1, 3, 0, 'beta', 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s %s' % (version, VERSION[3], VERSION[4])
from django.utils.version import get_svn_revision
svn_rev = get_svn_revision()
if svn_rev != u'SVN-unknown':
version = "%s %s" % (version, svn_rev)
return version
|
jacshfr/mozilla-bedrock | refs/heads/master | bedrock/newsletter/tests/test_misc.py | 25 | import mock
from django.test.utils import override_settings
from basket import BasketException, errors
from bedrock.mozorg.tests import TestCase
from bedrock.newsletter.utils import get_newsletters, get_languages_for_newsletters
from bedrock.newsletter.tests import newsletters
cache_mock = mock.Mock()
cache_mock.get.return_value = None
newsletters_mock = mock.Mock()
newsletters_mock.return_value = newsletters
@mock.patch('bedrock.newsletter.utils.cache', cache_mock)
class TestGetNewsletters(TestCase):
def test_simple_get(self):
# get_newsletters returns whatever it gets back from basket without
# changing it at all.
# Create a silly data structure to pass around
test_val = {'foo': {'zoo': 'zebra'}, 'bar': {'baz': 27}}
with mock.patch('basket.get_newsletters') as basket_get:
basket_get.return_value = test_val
result = get_newsletters()
self.assertEqual(test_val, result)
@mock.patch('basket.get_newsletters')
def test_get_newsletters_fallback(self, mock_basket_get_newsletters):
# if get_newsletters() cannot reach basket, it returns the
# newsletters from settings
mock_basket_get_newsletters.side_effect = BasketException(
'network error',
code=errors.BASKET_NETWORK_FAILURE,
)
default_value = mock.Mock()
with override_settings(DEFAULT_NEWSLETTERS=default_value):
return_value = get_newsletters()
self.assertEqual(default_value, return_value)
@mock.patch('bedrock.newsletter.utils.cache', cache_mock)
@mock.patch('bedrock.newsletter.utils.get_newsletters', newsletters_mock)
class TestGetNewsletterLanguages(TestCase):
def test_newsletter_langs(self):
"""Without args should return all langs."""
result = get_languages_for_newsletters()
good_set = set(['en', 'es', 'fr', 'de', 'pt', 'ru'])
self.assertSetEqual(good_set, result)
def test_single_newsletter_langs(self):
"""Should return languages for a single newsletter."""
result = get_languages_for_newsletters('join-mozilla')
good_set = set(['en', 'es'])
self.assertSetEqual(good_set, result)
def test_list_newsletter_langs(self):
"""Should return all languages for specified list of newsletters."""
result = get_languages_for_newsletters(['join-mozilla', 'beta'])
good_set = set(['en', 'es'])
self.assertSetEqual(good_set, result)
result = get_languages_for_newsletters(['firefox-tips', 'beta'])
good_set = set(['en', 'fr', 'de', 'pt', 'ru'])
self.assertSetEqual(good_set, result)
def test_works_with_bad_newsletter(self):
"""If given a bad newsletter name, should still return a set."""
result = get_languages_for_newsletters(['join-mozilla', 'eldudarino'])
good_set = set(['en', 'es'])
self.assertSetEqual(good_set, result)
|
leiferikb/bitpop | refs/heads/master | src/third_party/pywebsocket/src/test/test_endtoend.py | 449 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""End-to-end tests for pywebsocket. Tests standalone.py by default. You
can also test mod_pywebsocket hosted on an Apache server by setting
_use_external_server to True and modifying _external_server_port to point to
the port on which the Apache server is running.
"""
import logging
import os
import signal
import socket
import subprocess
import sys
import time
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from test import client_for_testing
from test import mux_client_for_testing
# Special message that tells the echo server to start closing handshake
_GOODBYE_MESSAGE = 'Goodbye'
_SERVER_WARMUP_IN_SEC = 0.2
# If you want to use external server to run end to end tests, set following
# parameters correctly.
_use_external_server = False
_external_server_port = 0
# Test body functions
def _echo_check_procedure(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message('helloworld')
client.assert_receive('helloworld')
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_binary(client):
client.connect()
client.send_message('binary', binary=True)
client.assert_receive('binary', binary=True)
client.send_message('\x00\x80\xfe\xff\x00\x80', binary=True)
client.assert_receive('\x00\x80\xfe\xff\x00\x80', binary=True)
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_goodbye(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message(_GOODBYE_MESSAGE)
client.assert_receive(_GOODBYE_MESSAGE)
client.assert_receive_close()
client.send_close()
client.assert_connection_closed()
def _echo_check_procedure_with_code_and_reason(client, code, reason):
client.connect()
client.send_close(code, reason)
client.assert_receive_close(code, reason)
client.assert_connection_closed()
def _unmasked_frame_check_procedure(client):
client.connect()
client.send_message('test', mask=False)
client.assert_receive_close(client_for_testing.STATUS_PROTOCOL_ERROR, '')
client.assert_connection_closed()
def _mux_echo_check_procedure(mux_client):
mux_client.connect()
mux_client.send_flow_control(1, 1024)
logical_channel_options = client_for_testing.ClientOptions()
logical_channel_options.server_host = 'localhost'
logical_channel_options.server_port = 80
logical_channel_options.origin = 'http://localhost'
logical_channel_options.resource = '/echo'
mux_client.add_channel(2, logical_channel_options)
mux_client.send_flow_control(2, 1024)
mux_client.send_message(2, 'test')
mux_client.assert_receive(2, 'test')
mux_client.add_channel(3, logical_channel_options)
mux_client.send_flow_control(3, 1024)
mux_client.send_message(2, 'hello')
mux_client.send_message(3, 'world')
mux_client.assert_receive(2, 'hello')
mux_client.assert_receive(3, 'world')
# Don't send close message on channel id 1 so that server-initiated
# closing handshake won't occur.
mux_client.send_close(2)
mux_client.send_close(3)
mux_client.assert_receive_close(2)
mux_client.assert_receive_close(3)
mux_client.send_physical_connection_close()
mux_client.assert_physical_connection_receive_close()
class EndToEndTestBase(unittest.TestCase):
"""Base class for end-to-end tests that launch pywebsocket standalone
server as a separate process, connect to it using the client_for_testing
module, and check if the server behaves correctly by exchanging opening
handshake and frames over a TCP connection.
"""
def setUp(self):
self.server_stderr = None
self.top_dir = os.path.join(os.path.split(__file__)[0], '..')
os.putenv('PYTHONPATH', os.path.pathsep.join(sys.path))
self.standalone_command = os.path.join(
self.top_dir, 'mod_pywebsocket', 'standalone.py')
self.document_root = os.path.join(self.top_dir, 'example')
s = socket.socket()
s.bind(('localhost', 0))
(_, self.test_port) = s.getsockname()
s.close()
self._options = client_for_testing.ClientOptions()
self._options.server_host = 'localhost'
self._options.origin = 'http://localhost'
self._options.resource = '/echo'
# TODO(toyoshim): Eliminate launching a standalone server on using
# external server.
if _use_external_server:
self._options.server_port = _external_server_port
else:
self._options.server_port = self.test_port
# TODO(tyoshino): Use tearDown to kill the server.
def _run_python_command(self, commandline, stdout=None, stderr=None):
return subprocess.Popen([sys.executable] + commandline, close_fds=True,
stdout=stdout, stderr=stderr)
def _run_server(self):
args = [self.standalone_command,
'-H', 'localhost',
'-V', 'localhost',
'-p', str(self.test_port),
'-P', str(self.test_port),
'-d', self.document_root]
# Inherit the level set to the root logger by test runner.
root_logger = logging.getLogger()
log_level = root_logger.getEffectiveLevel()
if log_level != logging.NOTSET:
args.append('--log-level')
args.append(logging.getLevelName(log_level).lower())
return self._run_python_command(args,
stderr=self.server_stderr)
def _kill_process(self, pid):
if sys.platform in ('win32', 'cygwin'):
subprocess.call(
('taskkill.exe', '/f', '/pid', str(pid)), close_fds=True)
else:
os.kill(pid, signal.SIGKILL)
class EndToEndHyBiTest(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test_with_client_options(self, test_function, options):
server = self._run_server()
try:
# TODO(tyoshino): add some logic to poll the server until it
# becomes ready
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_test(self, test_function):
self._run_test_with_client_options(test_function, self._options)
def _run_deflate_frame_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.enable_deflate_frame()
client = client_for_testing.create_client(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_permessage_deflate_test(
self, offer, response_checker, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.extensions += offer
self._options.check_permessage_deflate = response_checker
client = client_for_testing.create_client(self._options)
try:
client.connect()
if test_function is not None:
test_function(client)
client.assert_connection_closed()
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_close_with_code_and_reason_test(self, test_function, code,
reason):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(self._options)
try:
test_function(client, code, reason)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_http_fallback_test(self, options, status):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
client.connect()
self.fail('Could not catch HttpStatusException')
except client_for_testing.HttpStatusException, e:
self.assertEqual(status, e.status)
except Exception, e:
self.fail('Catch unexpected exception')
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_mux_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = mux_client_for_testing.MuxClient(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_binary(self):
self._run_test(_echo_check_procedure_with_binary)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
def test_unmasked_frame(self):
self._run_test(_unmasked_frame_check_procedure)
def test_echo_deflate_frame(self):
self._run_deflate_frame_test(_echo_check_procedure)
def test_echo_deflate_frame_server_close(self):
self._run_deflate_frame_test(
_echo_check_procedure_with_goodbye)
def test_echo_permessage_deflate(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_frames(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd',
client_for_testing.OPCODE_TEXT,
end=False,
rsv1=1)
client._stream.send_data(
'\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_messages(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x00\x11\x00\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_msgs_server_no_context_takeover(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_preference(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate', 'deflate-frame'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_parameters(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_max_window_bits', '10'),
('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=10; '
'server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_undefined_parameter(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; foo=bar'],
response_checker,
test_function)
def test_echo_close_with_code_and_reason(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, 3333, 'sunsunsunsun')
def test_echo_close_with_empty_body(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, None, '')
def test_mux_echo(self):
self._run_mux_test(_mux_echo_check_procedure)
def test_close_on_protocol_error(self):
"""Tests that the server sends a close frame with protocol error status
code when the client sends data with some protocol error.
"""
def test_function(client):
client.connect()
# Intermediate frame without any preceding start of fragmentation
# frame.
client.send_frame_of_arbitrary_bytes('\x80\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_PROTOCOL_ERROR)
self._run_test(test_function)
def test_close_on_unsupported_frame(self):
"""Tests that the server sends a close frame with unsupported operation
status code when the client sends data asking some operation that is
not supported by the server.
"""
def test_function(client):
client.connect()
# Text frame with RSV3 bit raised.
client.send_frame_of_arbitrary_bytes('\x91\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_UNSUPPORTED_DATA)
self._run_test(test_function)
def test_close_on_invalid_frame(self):
"""Tests that the server sends a close frame with invalid frame payload
data status code when the client sends an invalid frame like containing
invalid UTF-8 character.
"""
def test_function(client):
client.connect()
# Text frame with invalid UTF-8 string.
client.send_message('\x80', raw=True)
client.assert_receive_close(
client_for_testing.STATUS_INVALID_FRAME_PAYLOAD_DATA)
self._run_test(test_function)
def test_close_on_internal_endpoint_error(self):
"""Tests that the server sends a close frame with internal endpoint
error status code when the handler does bad operation.
"""
self._options.resource = '/internal_error'
def test_function(client):
client.connect()
client.assert_receive_close(
client_for_testing.STATUS_INTERNAL_ENDPOINT_ERROR)
self._run_test(test_function)
# TODO(toyoshim): Add tests to verify invalid absolute uri handling like
# host unmatch, port unmatch and invalid port description (':' without port
# number).
def test_absolute_uri(self):
"""Tests absolute uri request."""
options = self._options
options.resource = 'ws://localhost:%d/echo' % options.server_port
self._run_test_with_client_options(_echo_check_procedure, options)
def test_origin_check(self):
"""Tests http fallback on origin check fail."""
options = self._options
options.resource = '/origin_check'
# Server shows warning message for http 403 fallback. This warning
# message is confusing. Following pipe disposes warning messages.
self.server_stderr = subprocess.PIPE
self._run_http_fallback_test(options, 403)
def test_version_check(self):
"""Tests http fallback on version check fail."""
options = self._options
options.version = 99
self._run_http_fallback_test(options, 400)
class EndToEndHyBi00Test(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client_hybi00(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
class EndToEndTestWithEchoClient(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _check_example_echo_client_result(
self, expected, stdoutdata, stderrdata):
actual = stdoutdata.decode("utf-8")
if actual != expected:
raise Exception('Unexpected result on example echo client: '
'%r (expected) vs %r (actual)' %
(expected, actual))
if stderrdata is not None:
raise Exception('Unexpected error message on example echo '
'client: %r' % stderrdata)
def test_example_echo_client(self):
"""Tests that the echo_client.py example can talk with the server."""
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client_command = os.path.join(
self.top_dir, 'example', 'echo_client.py')
# Expected output for the default messages.
default_expectation = ('Send: Hello\n' 'Recv: Hello\n'
u'Send: \u65e5\u672c\n' u'Recv: \u65e5\u672c\n'
'Send close\n' 'Recv ack\n')
args = [client_command,
'-p', str(self._options.server_port)]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
# Process a big message for which extended payload length is used.
# To handle extended payload length, ws_version attribute will be
# accessed. This test checks that ws_version is correctly set.
big_message = 'a' * 1024
args = [client_command,
'-p', str(self._options.server_port),
'-m', big_message]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
expected = ('Send: %s\nRecv: %s\nSend close\nRecv ack\n' %
(big_message, big_message))
self._check_example_echo_client_result(
expected, stdoutdata, stderrdata)
# Test the permessage-deflate extension.
args = [client_command,
'-p', str(self._options.server_port),
'--use_permessage_deflate']
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
finally:
self._kill_process(server.pid)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
sahiljain/catapult | refs/heads/master | third_party/gsutil/third_party/boto/boto/cacerts/__init__.py | 260 | # Copyright 2010 Google Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
romain-dartigues/ansible | refs/heads/devel | test/units/modules/network/f5/test_bigip_qkview.py | 21 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_qkview import Parameters
from library.modules.bigip_qkview import ModuleManager
from library.modules.bigip_qkview import MadmLocationManager
from library.modules.bigip_qkview import BulkLocationManager
from library.modules.bigip_qkview import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_qkview import Parameters
from ansible.modules.network.f5.bigip_qkview import ModuleManager
from ansible.modules.network.f5.bigip_qkview import MadmLocationManager
from ansible.modules.network.f5.bigip_qkview import BulkLocationManager
from ansible.modules.network.f5.bigip_qkview import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
filename='foo.qkview',
asm_request_log=False,
max_file_size=1024,
complete_information=True,
exclude_core=True,
force=False,
exclude=['audit', 'secure'],
dest='/tmp/foo.qkview'
)
p = Parameters(params=args)
assert p.filename == 'foo.qkview'
assert p.asm_request_log is None
assert p.max_file_size == '-s 1024'
assert p.complete_information == '-c'
assert p.exclude_core == '-C'
assert p.force is False
assert len(p.exclude_core) == 2
assert 'audit' in p.exclude
assert 'secure' in p.exclude
assert p.dest == '/tmp/foo.qkview'
def test_module_asm_parameter(self):
args = dict(
asm_request_log=True,
)
p = Parameters(params=args)
assert p.asm_request_log == '-o asm-request-log'
class TestMadmLocationManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_qkview_default_options(self, *args):
set_module_args(dict(
dest='/tmp/foo.qkview',
server='localhost',
user='admin',
password='password'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = MadmLocationManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.execute_on_device = Mock(return_value=True)
tm._move_qkview_to_download = Mock(return_value=True)
tm._download_file = Mock(return_value=True)
tm._delete_qkview = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_less_than_14 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
with patch('os.path.exists') as mo:
mo.return_value = True
results = mm.exec_module()
assert results['changed'] is False
class TestBulkLocationManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_qkview_default_options(self, *args):
set_module_args(dict(
dest='/tmp/foo.qkview',
server='localhost',
user='admin',
password='password'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = BulkLocationManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.execute_on_device = Mock(return_value=True)
tm._move_qkview_to_download = Mock(return_value=True)
tm._download_file = Mock(return_value=True)
tm._delete_qkview = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_less_than_14 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
with patch('os.path.exists') as mo:
mo.return_value = True
results = mm.exec_module()
assert results['changed'] is False
|
jermowery/xos | refs/heads/master | xos/core/models/__init__.py | 1 | from .plcorebase import PlCoreBase,PlCoreBaseManager,PlCoreBaseDeletionManager,PlModelMixIn
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service, Tenant, TenantWithContainer, CoarseTenant, ServicePrivilege, TenantRoot, TenantRootPrivilege, TenantRootRole, TenantPrivilege, TenantRole, Subscriber, Provider
from .service import ServiceAttribute, TenantAttribute, ServiceRole
from .tag import Tag
from .role import Role
from .site import Site, Deployment, DeploymentRole, DeploymentPrivilege, Controller, ControllerRole, ControllerSite, SiteDeployment,Diag
from .dashboard import DashboardView, ControllerDashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .site import ControllerManager, ControllerDeletionManager, ControllerLinkManager,ControllerLinkDeletionManager
from .flavor import Flavor
from .image import Image
from .slice import Slice, ControllerSlice
from .controlleruser import ControllerUser, ControllerSitePrivilege, ControllerSlicePrivilege
from .image import ImageDeployments, ControllerImages
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .credential import UserCredential,SiteCredential,SliceCredential
from .site import SiteRole
from .site import SitePrivilege
from .node import Node, NodeLabel
from .slicetag import SliceTag
from .instance import Instance
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, Port, NetworkTemplate, Router, NetworkSlice, ControllerNetwork, AddressPool
from .billing import Account, Invoice, Charge, UsableObject, Payment
from .program import Program
|
jhona22baz/blog-flask | refs/heads/master | python2.7/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/compat.py | 2942 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
takis/odoo | refs/heads/8.0 | openerp/tools/test_reports.py | 337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Helper functions for reports testing.
Please /do not/ import this file by default, but only explicitly call it
through the code of yaml tests.
"""
import openerp
import openerp.report
import openerp.tools as tools
import logging
from openerp.tools.safe_eval import safe_eval
from subprocess import Popen, PIPE
import os
import tempfile
_logger = logging.getLogger(__name__)
_test_logger = logging.getLogger('openerp.tests')
def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None, report_type=None):
""" Try to render a report <rname> with contents of ids
This function should also check for common pitfalls of reports.
"""
if data is None:
data = {}
if context is None:
context = {}
if rname.startswith('report.'):
rname_s = rname[7:]
else:
rname_s = rname
_test_logger.info(" - Trying %s.create(%r)", rname, ids)
res = openerp.report.render_report(cr, uid, ids, rname_s, data, context)
if not isinstance(res, tuple):
raise RuntimeError("Result of %s.create() should be a (data,format) tuple, now it is a %s" % \
(rname, type(res)))
(res_data, res_format) = res
if not res_data:
raise ValueError("Report %s produced an empty result!" % rname)
if tools.config['test_report_directory']:
file(os.path.join(tools.config['test_report_directory'], rname+ '.'+res_format), 'wb+').write(res_data)
_logger.debug("Have a %s report for %s, will examine it", res_format, rname)
if res_format == 'pdf':
if res_data[:5] != '%PDF-':
raise ValueError("Report %s produced a non-pdf header, %r" % (rname, res_data[:10]))
res_text = False
try:
fd, rfname = tempfile.mkstemp(suffix=res_format)
os.write(fd, res_data)
os.close(fd)
proc = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', rfname, '-'], shell=False, stdout=PIPE)
stdout, stderr = proc.communicate()
res_text = tools.ustr(stdout)
os.unlink(rfname)
except Exception:
_logger.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
if res_text is not False:
for line in res_text.split('\n'):
if ('[[' in line) or ('[ [' in line):
_logger.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
# TODO more checks, what else can be a sign of a faulty report?
elif res_format == 'html':
pass
else:
_logger.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
return False
_test_logger.info(" + Report %s produced correctly.", rname)
return True
def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
wiz_data=None, wiz_buttons=None,
context=None, our_module=None):
"""Take an ir.action.act_window and follow it until a report is produced
:param action_id: the integer id of an action, or a reference to xml id
of the act_window (can search [our_module.]+xml_id
:param active_model, active_ids: call the action as if it had been launched
from that model+ids (tree/form view action)
:param wiz_data: a dictionary of values to use in the wizard, if needed.
They will override (or complete) the default values of the
wizard form.
:param wiz_buttons: a list of button names, or button icon strings, which
should be preferred to press during the wizard.
Eg. 'OK' or 'gtk-print'
:param our_module: the name of the calling module (string), like 'account'
"""
if not our_module and isinstance(action_id, basestring):
if '.' in action_id:
our_module = action_id.split('.', 1)[0]
if context is None:
context = {}
else:
context = context.copy() # keep it local
# TODO context fill-up
registry = openerp.registry(cr.dbname)
def log_test(msg, *args):
_test_logger.info(" - " + msg, *args)
datas = {}
if active_model:
datas['model'] = active_model
if active_ids:
datas['ids'] = active_ids
if not wiz_buttons:
wiz_buttons = []
if isinstance(action_id, basestring):
if '.' in action_id:
act_module, act_xmlid = action_id.split('.', 1)
else:
if not our_module:
raise ValueError('You cannot only specify action_id "%s" without a module name' % action_id)
act_module = our_module
act_xmlid = action_id
act_model, act_id = registry['ir.model.data'].get_object_reference(cr, uid, act_module, act_xmlid)
else:
assert isinstance(action_id, (long, int))
act_model = 'ir.action.act_window' # assume that
act_id = action_id
act_xmlid = '<%s>' % act_id
def _exec_action(action, datas, context):
# taken from client/modules/action/main.py:84 _exec_action()
if isinstance(action, bool) or 'type' not in action:
return
# Updating the context : Adding the context of action in order to use it on Views called from buttons
if datas.get('id',False):
context.update( {'active_id': datas.get('id',False), 'active_ids': datas.get('ids',[]), 'active_model': datas.get('model',False)})
context1 = action.get('context', {})
if isinstance(context1, basestring):
context1 = safe_eval(context1, context.copy())
context.update(context1)
if action['type'] in ['ir.actions.act_window', 'ir.actions.submenu']:
for key in ('res_id', 'res_model', 'view_type', 'view_mode',
'limit', 'auto_refresh', 'search_view', 'auto_search', 'search_view_id'):
datas[key] = action.get(key, datas.get(key, None))
view_id = False
if action.get('views', []):
if isinstance(action['views'],list):
view_id = action['views'][0][0]
datas['view_mode']= action['views'][0][1]
else:
if action.get('view_id', False):
view_id = action['view_id'][0]
elif action.get('view_id', False):
view_id = action['view_id'][0]
assert datas['res_model'], "Cannot use the view without a model"
# Here, we have a view that we need to emulate
log_test("will emulate a %s view: %s#%s",
action['view_type'], datas['res_model'], view_id or '?')
view_res = registry[datas['res_model']].fields_view_get(cr, uid, view_id, action['view_type'], context)
assert view_res and view_res.get('arch'), "Did not return any arch for the view"
view_data = {}
if view_res.get('fields',{}).keys():
view_data = registry[datas['res_model']].default_get(cr, uid, view_res['fields'].keys(), context)
if datas.get('form'):
view_data.update(datas.get('form'))
if wiz_data:
view_data.update(wiz_data)
_logger.debug("View data is: %r", view_data)
for fk, field in view_res.get('fields',{}).items():
# Default fields returns list of int, while at create()
# we need to send a [(6,0,[int,..])]
if field['type'] in ('one2many', 'many2many') \
and view_data.get(fk, False) \
and isinstance(view_data[fk], list) \
and not isinstance(view_data[fk][0], tuple) :
view_data[fk] = [(6, 0, view_data[fk])]
action_name = action.get('name')
try:
from xml.dom import minidom
cancel_found = False
buttons = []
dom_doc = minidom.parseString(view_res['arch'])
if not action_name:
action_name = dom_doc.documentElement.getAttribute('name')
for button in dom_doc.getElementsByTagName('button'):
button_weight = 0
if button.getAttribute('special') == 'cancel':
cancel_found = True
continue
if button.getAttribute('icon') == 'gtk-cancel':
cancel_found = True
continue
if button.getAttribute('default_focus') == '1':
button_weight += 20
if button.getAttribute('string') in wiz_buttons:
button_weight += 30
elif button.getAttribute('icon') in wiz_buttons:
button_weight += 10
string = button.getAttribute('string') or '?%s' % len(buttons)
buttons.append( { 'name': button.getAttribute('name'),
'string': string,
'type': button.getAttribute('type'),
'weight': button_weight,
})
except Exception, e:
_logger.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True)
raise AssertionError(e.args[0])
if not datas['res_id']:
# it is probably an orm_memory object, we need to create
# an instance
datas['res_id'] = registry[datas['res_model']].create(cr, uid, view_data, context)
if not buttons:
raise AssertionError("view form doesn't have any buttons to press!")
buttons.sort(key=lambda b: b['weight'])
_logger.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons]))
res = None
while buttons and not res:
b = buttons.pop()
log_test("in the \"%s\" form, I will press the \"%s\" button.", action_name, b['string'])
if not b['type']:
log_test("the \"%s\" button has no type, cannot use it", b['string'])
continue
if b['type'] == 'object':
#there we are! press the button!
fn = getattr(registry[datas['res_model']], b['name'])
if not fn:
_logger.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name'])
continue
res = fn(cr, uid, [datas['res_id'],], context)
break
else:
_logger.warning("in the \"%s\" form, the \"%s\" button has unknown type %s",
action_name, b['string'], b['type'])
return res
elif action['type']=='ir.actions.report.xml':
if 'window' in datas:
del datas['window']
if not datas:
datas = action.get('datas')
if not datas:
datas = action.get('data')
datas = datas.copy()
ids = datas.get('ids')
if 'ids' in datas:
del datas['ids']
res = try_report(cr, uid, 'report.'+action['report_name'], ids, datas, context, our_module=our_module)
return res
else:
raise Exception("Cannot handle action of type %s" % act_model)
log_test("will be using %s action %s #%d", act_model, act_xmlid, act_id)
action = registry[act_model].read(cr, uid, [act_id], context=context)[0]
assert action, "Could not read action %s[%s]" %(act_model, act_id)
loop = 0
while action:
loop += 1
# This part tries to emulate the loop of the Gtk client
if loop > 100:
_logger.error("Passed %d loops, giving up", loop)
raise Exception("Too many loops at action")
log_test("it is an %s action at loop #%d", action.get('type', 'unknown'), loop)
result = _exec_action(action, datas, context)
if not isinstance(result, dict):
break
datas = result.get('datas', {})
if datas:
del result['datas']
action = result
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
aaldaber/owid-grapher | refs/heads/master | importer/edstats_importer.py | 1 | import sys
import os
import hashlib
import json
import logging
import requests
import unidecode
import shutil
import time
import zipfile
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import grapher_admin.wsgi
from openpyxl import load_workbook
from grapher_admin.models import Entity, DatasetSubcategory, DatasetCategory, Dataset, Source, Variable, VariableType, DataValue, ChartDimension
from importer.models import ImportHistory, AdditionalCountryInfo
from country_name_tool.models import CountryName
from django.conf import settings
from django.db import connection, transaction
from django.utils import timezone
from django.urls import reverse
from grapher_admin.views import write_dataset_csv
# we will use the file checksum to check if the downloaded file has changed since we last saw it
def file_checksum(filename, blocksize=2**20):
m = hashlib.md5()
with open(filename, "rb") as f:
while True:
buffer = f.read(blocksize)
if not buffer:
break
m.update(buffer)
return m.hexdigest()
def short_unit_extract(unit: str):
common_short_units = ['$', '£', '€', '%'] # used for extracting short forms of units of measurement
short_unit = None
if unit:
if ' per ' in unit:
short_form = unit.split(' per ')[0]
if any(w in short_form for w in common_short_units):
for x in common_short_units:
if x in short_form:
short_unit = x
break
else:
short_unit = short_form
elif any(x in unit for x in common_short_units):
for y in common_short_units:
if y in unit:
short_unit = y
break
elif 'percentage' in unit:
short_unit = '%'
elif 'percent' in unit.lower():
short_unit = '%'
elif len(unit) < 9: # this length is sort of arbitrary at this point, taken from the unit 'hectares'
short_unit = unit
return short_unit
source_description = {
'dataPublishedBy': "World Bank EdStats",
'link': "https://data.worldbank.org/data-catalog/ed-stats",
'retrievedDate': timezone.now().strftime("%d-%B-%y")
}
edstats_zip_file_url = 'http://databank.worldbank.org/data/download/EdStats_excel.zip'
edstats_downloads_save_location = settings.BASE_DIR + '/data/edstats_downloads/'
# create a directory for holding the downloads
# if the directory exists, delete it and recreate it
if not os.path.exists(edstats_downloads_save_location):
os.makedirs(edstats_downloads_save_location)
#else:
# shutil.rmtree(edstats_downloads_save_location)
# os.makedirs(edstats_downloads_save_location)
logger = logging.getLogger('importer')
start_time = time.time()
logger.info("Getting the zip file")
request_header = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r = requests.get(edstats_zip_file_url, stream=True, headers=request_header)
if r.ok:
with open(edstats_downloads_save_location + 'edstats.zip', 'wb') as out_file:
shutil.copyfileobj(r.raw, out_file)
logger.info("Saved the zip file to disk.")
z = zipfile.ZipFile(edstats_downloads_save_location + 'edstats.zip')
excel_filename = edstats_downloads_save_location + z.namelist()[0] # there should be only one file inside the zipfile, so we will load that one
z.extractall(edstats_downloads_save_location)
r = None # we do not need the request anymore
logger.info("Successfully extracted the zip file")
else:
logger.error("The file could not be downloaded. Stopping the script...")
sys.exit("Could not download file.")
edstats_category_name_in_db = 'World Bank EdStats' # set the name of the root category of all data that will be imported by this script
import_history = ImportHistory.objects.filter(import_type='edstats')
#excel_filename = edstats_downloads_save_location + "WDIEXCEL.xlsx"
with transaction.atomic():
# if edstats imports were never performed
if not import_history:
logger.info("This is the very first EdStats data import.")
wb = load_workbook(excel_filename, read_only=True)
series_ws = wb['Series']
data_ws = wb['Data']
country_ws = wb['Country']
column_number = 0 # this will be reset to 0 on each new row
row_number = 0 # this will be reset to 0 if we switch to another worksheet, or start reading the worksheet from the beginning one more time
global_cat = {} # global catalog of indicators
# data in the worksheets is not loaded into memory at once, that causes RAM to quickly fill up
# instead, we go through each row and cell one-by-one, looking at each piece of data separately
# this has the disadvantage of needing to traverse the worksheet several times, if we need to look up some rows/cells again
for row in series_ws.rows:
row_number += 1
for cell in row:
if row_number > 1:
column_number += 1
if column_number == 1:
global_cat[cell.value.upper().strip()] = {}
indicatordict = global_cat[cell.value.upper().strip()]
if column_number == 2:
indicatordict['category'] = cell.value
if column_number == 3:
indicatordict['name'] = cell.value
if column_number == 5:
indicatordict['description'] = cell.value
if column_number == 6:
if cell.value:
indicatordict['unitofmeasure'] = cell.value
else:
if '(' not in indicatordict['name']:
indicatordict['unitofmeasure'] = ''
else:
indicatordict['unitofmeasure'] = indicatordict['name'][
indicatordict['name'].rfind('(') + 1:indicatordict[
'name'].rfind(')')]
if column_number == 11:
if cell.value:
indicatordict['limitations'] = cell.value
else:
indicatordict['limitations'] = ''
if column_number == 12:
if cell.value:
indicatordict['sourcenotes'] = cell.value
else:
indicatordict['sourcenotes'] = ''
if column_number == 13:
if cell.value:
indicatordict['comments'] = cell.value
else:
indicatordict['comments'] = ''
if column_number == 14:
indicatordict['source'] = cell.value
if column_number == 15:
if cell.value:
indicatordict['concept'] = cell.value
else:
indicatordict['concept'] = ''
if column_number == 17:
if cell.value:
indicatordict['sourcelinks'] = cell.value
else:
indicatordict['sourcelinks'] = ''
if column_number == 18:
if cell.value:
indicatordict['weblinks'] = cell.value
else:
indicatordict['weblinks'] = ''
indicatordict['saved'] = False
column_number = 0
category_vars = {} # categories and their corresponding variables
for key, value in global_cat.items():
if value['category'] in category_vars:
category_vars[value['category']].append(key)
else:
category_vars[value['category']] = []
category_vars[value['category']].append(key)
existing_categories = DatasetCategory.objects.values('name')
existing_categories_list = {item['name'] for item in existing_categories}
if edstats_category_name_in_db not in existing_categories_list:
the_category = DatasetCategory(name=edstats_category_name_in_db, fetcher_autocreated=True)
the_category.save()
logger.info("Inserting a category %s." % edstats_category_name_in_db.encode('utf8'))
else:
the_category = DatasetCategory.objects.get(name=edstats_category_name_in_db)
existing_subcategories = DatasetSubcategory.objects.filter(categoryId=the_category.pk).values('name')
existing_subcategories_list = {item['name'] for item in existing_subcategories}
edstats_categories_list = []
for key, value in category_vars.items():
edstats_categories_list.append(key)
if key not in existing_subcategories_list:
the_subcategory = DatasetSubcategory(name=key, categoryId=the_category)
the_subcategory.save()
logger.info("Inserting a subcategory %s." % key.encode('utf8'))
existing_entities = Entity.objects.values('name')
existing_entities_list = {item['name'] for item in existing_entities}
country_tool_names = CountryName.objects.all()
country_tool_names_dict = {}
for each in country_tool_names:
country_tool_names_dict[each.country_name.lower()] = each.owid_country
country_name_entity_ref = {} # this dict will hold the country names from excel and the appropriate entity object (this is used when saving the variables and their values)
row_number = 0
for row in country_ws.rows:
row_number += 1
for cell in row:
if row_number > 1:
column_number += 1
if column_number == 1:
country_code = cell.value
if column_number == 3:
country_name = cell.value
if column_number == 7:
country_special_notes = cell.value
if column_number == 8:
country_region = cell.value
if column_number == 9:
country_income_group = cell.value
if column_number == 24:
country_latest_census = cell.value
if column_number == 25:
country_latest_survey = cell.value
if column_number == 26:
country_recent_income_source = cell.value
if column_number == 31:
entity_info = AdditionalCountryInfo()
entity_info.country_code = country_code
entity_info.country_name = country_name
entity_info.country_wb_region = country_region
entity_info.country_wb_income_group = country_income_group
entity_info.country_special_notes = country_special_notes
entity_info.country_latest_census = country_latest_census
entity_info.country_latest_survey = country_latest_survey
entity_info.country_recent_income_source = country_recent_income_source
entity_info.dataset = 'edstats'
entity_info.save()
if country_tool_names_dict.get(unidecode.unidecode(country_name.lower()), 0):
newentity = Entity.objects.get(name=country_tool_names_dict[unidecode.unidecode(country_name.lower())].owid_name)
elif country_name in existing_entities_list:
newentity = Entity.objects.get(name=country_name)
else:
newentity = Entity(name=country_name, validated=False)
newentity.save()
logger.info("Inserting a country %s." % newentity.name.encode('utf8'))
country_name_entity_ref[country_code] = newentity
column_number = 0
# this block of code is needed to insert the country British Virgin Islands with the code VGB
# without inserting this country name, the script will throw an error when reading the data values
# the EdStats file seems to be missing this country name and info in the Country worksheet
country_name = 'British Virgin Islands'
country_code = 'VGB'
if country_tool_names_dict.get(unidecode.unidecode(country_name.lower()), 0):
newentity = Entity.objects.get(
name=country_tool_names_dict[unidecode.unidecode(country_name.lower())].owid_name)
elif country_name in existing_entities_list:
newentity = Entity.objects.get(name=country_name)
else:
newentity = Entity(name=country_name, validated=False)
newentity.save()
logger.info("Inserting a country %s." % newentity.name.encode('utf8'))
country_name_entity_ref[country_code] = newentity
# end of VGB-related code block
insert_string = 'INSERT into data_values (value, year, entityId, variableId) VALUES (%s, %s, %s, %s)' # this is used for constructing the query for mass inserting to the data_values table
data_values_tuple_list = []
datasets_list = []
for category in edstats_categories_list:
newdataset = Dataset(name='World Bank EdStats - ' + category,
description='This is a dataset imported by the automated fetcher',
namespace='edstats', categoryId=the_category,
subcategoryId=DatasetSubcategory.objects.get(name=category, categoryId=the_category))
newdataset.save()
datasets_list.append(newdataset)
logger.info("Inserting a dataset %s." % newdataset.name.encode('utf8'))
row_number = 0
columns_to_years = {}
for row in data_ws.rows:
row_number += 1
data_values = []
for cell in row:
if row_number == 1:
column_number += 1
if cell.value:
try:
last_available_year = int(cell.value)
columns_to_years[column_number] = last_available_year
last_available_column = column_number
except:
pass
if row_number > 1:
column_number += 1
if column_number == 1:
country_name = cell.value
if column_number == 2:
country_code = cell.value
if column_number == 3:
indicator_name = cell.value
if column_number == 4:
indicator_code = cell.value.upper().strip()
if column_number > 4 and column_number <= last_available_column:
if cell.value or cell.value == 0:
data_values.append({'value': cell.value, 'year': columns_to_years[column_number]})
if column_number > 4 and column_number == last_available_column:
if len(data_values):
if indicator_code in category_vars[category]:
if not global_cat[indicator_code]['saved']:
source_description['additionalInfo'] = "Definitions and characteristics of countries and other territories: " + "https://ourworldindata.org" + reverse("serveedstatscountryinfo") + "\n"
source_description['additionalInfo'] += "Limitations and exceptions:\n" + global_cat[indicator_code]['limitations'] + "\n" if global_cat[indicator_code]['limitations'] else ''
source_description['additionalInfo'] += "Notes from original source:\n" + global_cat[indicator_code]['sourcenotes'] + "\n" if global_cat[indicator_code]['sourcenotes'] else ''
source_description['additionalInfo'] += "General comments:\n" + global_cat[indicator_code]['comments'] + "\n" if global_cat[indicator_code]['comments'] else ''
source_description['additionalInfo'] += "Statistical concept and methodology:\n" + global_cat[indicator_code]['concept'] + "\n" if global_cat[indicator_code]['concept'] else ''
source_description['additionalInfo'] += "Related source links:\n" + global_cat[indicator_code]['sourcelinks'] + "\n" if global_cat[indicator_code]['sourcelinks'] else ''
source_description['additionalInfo'] += "Other web links:\n" + global_cat[indicator_code]['weblinks'] + "\n" if global_cat[indicator_code]['weblinks'] else ''
source_description['dataPublisherSource'] = global_cat[indicator_code]['source']
newsource = Source(name='World Bank EdStats: ' + global_cat[indicator_code]['name'],
description=json.dumps(source_description),
datasetId=newdataset.pk)
newsource.save()
logger.info("Inserting a source %s." % newsource.name.encode('utf8'))
s_unit = short_unit_extract(global_cat[indicator_code]['unitofmeasure'])
newvariable = Variable(name=global_cat[indicator_code]['name'], unit=global_cat[indicator_code]['unitofmeasure'] if global_cat[indicator_code]['unitofmeasure'] else '', short_unit=s_unit, description=global_cat[indicator_code]['description'],
code=indicator_code, timespan='1970-' + str(last_available_year), datasetId=newdataset, variableTypeId=VariableType.objects.get(pk=4), sourceId=newsource)
newvariable.save()
logger.info("Inserting a variable %s." % newvariable.name.encode('utf8'))
global_cat[indicator_code]['variable_object'] = newvariable
global_cat[indicator_code]['saved'] = True
else:
newvariable = global_cat[indicator_code]['variable_object']
for i in range(0, len(data_values)):
data_values_tuple_list.append((data_values[i]['value'], data_values[i]['year'], country_name_entity_ref[country_code].pk, newvariable.pk))
if len(data_values_tuple_list) > 3000: # insert when the length of the list goes over 3000
with connection.cursor() as c:
c.executemany(insert_string, data_values_tuple_list)
logger.info("Dumping data values...")
data_values_tuple_list = []
column_number = 0
if row_number % 10 == 0:
time.sleep(0.001) # this is done in order to not keep the CPU busy all the time, the delay after each 10th row is 1 millisecond
if len(data_values_tuple_list): # insert any leftover data_values
with connection.cursor() as c:
c.executemany(insert_string, data_values_tuple_list)
logger.info("Dumping data values...")
newimport = ImportHistory(import_type='edstats', import_time=timezone.now().strftime('%Y-%m-%d %H:%M:%S'),
import_notes='Initial import of Edstats',
import_state=json.dumps({'file_hash': file_checksum(edstats_downloads_save_location + 'edstats.zip')}))
newimport.save()
for dataset in datasets_list:
write_dataset_csv(dataset.pk, dataset.name, None, 'edstats_fetcher', '')
logger.info("Import complete.")
else:
last_import = import_history.last()
deleted_indicators = {} # This is used to keep track which variables' data values were already deleted before writing new values
if json.loads(last_import.import_state)['file_hash'] == file_checksum(edstats_downloads_save_location + 'edstats.zip'):
logger.info('No updates available.')
sys.exit('No updates available.')
logger.info('New data is available.')
available_variables = Variable.objects.filter(datasetId__in=Dataset.objects.filter(namespace='edstats'))
available_variables_list = []
for each in available_variables.values('code'):
available_variables_list.append(each['code'])
chart_dimension_vars = ChartDimension.objects.all().values('variableId').distinct()
chart_dimension_vars_list = {item['variableId'] for item in chart_dimension_vars}
existing_variables_ids = [item['id'] for item in available_variables.values('id')]
existing_variables_id_code = {item['id']: item['code'] for item in available_variables.values('id', 'code')}
existing_variables_code_id = {item['code']: item['id'] for item in available_variables.values('id', 'code')}
vars_being_used = [] # we will not be deleting any variables that are currently being used by charts
for each_var in existing_variables_ids:
if each_var in chart_dimension_vars_list:
vars_being_used.append(existing_variables_id_code[each_var])
wb = load_workbook(excel_filename, read_only=True)
series_ws = wb['Series']
data_ws = wb['Data']
country_ws = wb['Country']
column_number = 0 # this will be reset to 0 on each new row
row_number = 0 # this will be reset to 0 if we switch to another worksheet, or start reading the worksheet from the beginning one more time
global_cat = {} # global catalog of indicators
# data in the worksheets is not loaded into memory at once, that causes RAM to quickly fill up
# instead, we go through each row and cell one-by-one, looking at each piece of data separately
# this has the disadvantage of needing to traverse the worksheet several times, if we need to look up some rows/cells again
for row in series_ws.rows:
row_number += 1
for cell in row:
if row_number > 1:
column_number += 1
if column_number == 1:
global_cat[cell.value.upper().strip()] = {}
indicatordict = global_cat[cell.value.upper().strip()]
if column_number == 2:
indicatordict['category'] = cell.value
if column_number == 3:
indicatordict['name'] = cell.value
if column_number == 5:
indicatordict['description'] = cell.value
if column_number == 6:
if cell.value:
indicatordict['unitofmeasure'] = cell.value
else:
if '(' not in indicatordict['name']:
indicatordict['unitofmeasure'] = ''
else:
indicatordict['unitofmeasure'] = indicatordict['name'][
indicatordict['name'].rfind('(') + 1:indicatordict[
'name'].rfind(')')]
if column_number == 11:
if cell.value:
indicatordict['limitations'] = cell.value
else:
indicatordict['limitations'] = ''
if column_number == 12:
if cell.value:
indicatordict['sourcenotes'] = cell.value
else:
indicatordict['sourcenotes'] = ''
if column_number == 13:
if cell.value:
indicatordict['comments'] = cell.value
else:
indicatordict['comments'] = ''
if column_number == 14:
indicatordict['source'] = cell.value
if column_number == 15:
if cell.value:
indicatordict['concept'] = cell.value
else:
indicatordict['concept'] = ''
if column_number == 17:
if cell.value:
indicatordict['sourcelinks'] = cell.value
else:
indicatordict['sourcelinks'] = ''
if column_number == 18:
if cell.value:
indicatordict['weblinks'] = cell.value
else:
indicatordict['weblinks'] = ''
indicatordict['saved'] = False
column_number = 0
new_variables = []
for key, value in global_cat.items():
new_variables.append(key)
vars_to_add = list(set(new_variables).difference(available_variables_list))
newly_added_vars = list(set(new_variables).difference(available_variables_list))
vars_to_delete = list(set(available_variables_list).difference(new_variables))
for each in vars_to_delete:
if each not in vars_being_used:
logger.info("Deleting data values for the variable: %s" % each.encode('utf8'))
while DataValue.objects.filter(variableId__pk=existing_variables_code_id[each]).first():
with connection.cursor() as c: # if we don't limit the deleted values, the db might just hang
c.execute('DELETE FROM %s WHERE variableId = %s LIMIT 10000;' %
(DataValue._meta.db_table, existing_variables_code_id[each]))
source_object = Variable.objects.get(code=each, datasetId__in=Dataset.objects.filter(namespace='edstats')).sourceId
Variable.objects.get(code=each, datasetId__in=Dataset.objects.filter(namespace='edstats')).delete()
logger.info("Deleting the variable: %s" % each.encode('utf8'))
logger.info("Deleting the source: %s" % source_object.name.encode('utf8'))
source_object.delete()
category_vars = {} # categories and their corresponding variables
for key, value in global_cat.items():
if value['category'] in category_vars:
category_vars[value['category']].append(key)
else:
category_vars[value['category']] = []
category_vars[value['category']].append(key)
existing_categories = DatasetCategory.objects.values('name')
existing_categories_list = {item['name'] for item in existing_categories}
if edstats_category_name_in_db not in existing_categories_list:
the_category = DatasetCategory(name=edstats_category_name_in_db, fetcher_autocreated=True)
the_category.save()
logger.info("Inserting a category %s." % edstats_category_name_in_db.encode('utf8'))
else:
the_category = DatasetCategory.objects.get(name=edstats_category_name_in_db)
existing_subcategories = DatasetSubcategory.objects.filter(categoryId=the_category).values('name')
existing_subcategories_list = {item['name'] for item in existing_subcategories}
edstats_categories_list = []
for key, value in category_vars.items():
edstats_categories_list.append(key)
if key not in existing_subcategories_list:
the_subcategory = DatasetSubcategory(name=key, categoryId=the_category)
the_subcategory.save()
logger.info("Inserting a subcategory %s." % key.encode('utf8'))
cats_to_add = list(set(edstats_categories_list).difference(list(existing_subcategories_list)))
existing_entities = Entity.objects.values('name')
existing_entities_list = {item['name'] for item in existing_entities}
country_tool_names = CountryName.objects.all()
country_tool_names_dict = {}
for each in country_tool_names:
country_tool_names_dict[each.country_name.lower()] = each.owid_country
country_name_entity_ref = {} # this dict will hold the country names from excel and the appropriate entity object (this is used when saving the variables and their values)
AdditionalCountryInfo.objects.filter(dataset='edstats').delete() # We will load new additional country data now
row_number = 0
for row in country_ws.rows:
row_number += 1
for cell in row:
if row_number > 1:
column_number += 1
if column_number == 1:
country_code = cell.value
if column_number == 3:
country_name = cell.value
if column_number == 7:
country_special_notes = cell.value
if column_number == 8:
country_region = cell.value
if column_number == 9:
country_income_group = cell.value
if column_number == 24:
country_latest_census = cell.value
if column_number == 25:
country_latest_survey = cell.value
if column_number == 26:
country_recent_income_source = cell.value
if column_number == 31:
entity_info = AdditionalCountryInfo()
entity_info.country_code = country_code
entity_info.country_name = country_name
entity_info.country_wb_region = country_region
entity_info.country_wb_income_group = country_income_group
entity_info.country_special_notes = country_special_notes
entity_info.country_latest_census = country_latest_census
entity_info.country_latest_survey = country_latest_survey
entity_info.country_recent_income_source = country_recent_income_source
entity_info.dataset = 'edstats'
entity_info.save()
if country_tool_names_dict.get(unidecode.unidecode(country_name.lower()), 0):
newentity = Entity.objects.get(name=country_tool_names_dict[unidecode.unidecode(country_name.lower())].owid_name)
elif country_name in existing_entities_list:
newentity = Entity.objects.get(name=country_name)
else:
newentity = Entity(name=country_name, validated=False)
newentity.save()
logger.info("Inserting a country %s." % newentity.name.encode('utf8'))
country_name_entity_ref[country_code] = newentity
column_number = 0
# this block of code is needed to insert the country British Virgin Islands with the code VGB
# without inserting this country name, the script will throw an error when reading the data values
# the EdStats file seems to be missing this country name and info in their Country worksheet
country_name = 'British Virgin Islands'
country_code = 'VGB'
if country_tool_names_dict.get(unidecode.unidecode(country_name.lower()), 0):
newentity = Entity.objects.get(
name=country_tool_names_dict[unidecode.unidecode(country_name.lower())].owid_name)
elif country_name in existing_entities_list:
newentity = Entity.objects.get(name=country_name)
else:
newentity = Entity(name=country_name, validated=False)
newentity.save()
logger.info("Inserting a country %s." % newentity.name.encode('utf8'))
country_name_entity_ref[country_code] = newentity
# end of VGB-related code block
insert_string = 'INSERT into data_values (value, year, entityId, variableId) VALUES (%s, %s, %s, %s)' # this is used for constructing the query for mass inserting to the data_values table
data_values_tuple_list = []
total_values_tracker = 0
dataset_id_oldname_list = []
for category in edstats_categories_list:
if category in cats_to_add:
newdataset = Dataset(name='World Bank EdStats - ' + category,
description='This is a dataset imported by the automated fetcher',
namespace='edstats', categoryId=the_category,
subcategoryId=DatasetSubcategory.objects.get(name=category,
categoryId=the_category))
newdataset.save()
dataset_id_oldname_list.append({'id': newdataset.pk, 'newname': newdataset.name, 'oldname': None})
logger.info("Inserting a dataset %s." % newdataset.name.encode('utf8'))
else:
newdataset = Dataset.objects.get(name='World Bank EdStats - ' + category, categoryId=DatasetCategory.objects.get(
name=edstats_category_name_in_db))
dataset_id_oldname_list.append({'id': newdataset.pk, 'newname': newdataset.name, 'oldname': newdataset.name})
row_number = 0
columns_to_years = {}
for row in data_ws.rows:
row_number += 1
data_values = []
for cell in row:
if row_number == 1:
column_number += 1
if cell.value:
try:
last_available_year = int(cell.value)
columns_to_years[column_number] = last_available_year
last_available_column = column_number
except:
pass
if row_number > 1:
column_number += 1
if column_number == 1:
country_name = cell.value
if column_number == 2:
country_code = cell.value
if column_number == 3:
indicator_name = cell.value
if column_number == 4:
indicator_code = cell.value.upper().strip()
if column_number > 4 and column_number <= last_available_column:
if cell.value or cell.value == 0:
data_values.append({'value': cell.value, 'year': columns_to_years[column_number]})
if column_number > 4 and column_number == last_available_column:
if len(data_values):
if indicator_code in category_vars[category]:
total_values_tracker += len(data_values)
if indicator_code in vars_to_add:
source_description['additionalInfo'] = "Definitions and characteristics of countries and other territories: " + "https://ourworldindata.org" + reverse("serveedstatscountryinfo") + "\n"
source_description['additionalInfo'] += "Limitations and exceptions:\n" + global_cat[indicator_code]['limitations'] + "\n" if global_cat[indicator_code]['limitations'] else ''
source_description['additionalInfo'] += "Notes from original source:\n" + global_cat[indicator_code]['sourcenotes'] + "\n" if global_cat[indicator_code]['sourcenotes'] else ''
source_description['additionalInfo'] += "General comments:\n" + global_cat[indicator_code]['comments'] + "\n" if global_cat[indicator_code]['comments'] else ''
source_description['additionalInfo'] += "Statistical concept and methodology:\n" + global_cat[indicator_code]['concept'] + "\n" if global_cat[indicator_code]['concept'] else ''
source_description['additionalInfo'] += "Related source links:\n" + global_cat[indicator_code]['sourcelinks'] + "\n" if global_cat[indicator_code]['sourcelinks'] else ''
source_description['additionalInfo'] += "Other web links:\n" + global_cat[indicator_code]['weblinks'] + "\n" if global_cat[indicator_code]['weblinks'] else ''
source_description['dataPublisherSource'] = global_cat[indicator_code]['source']
newsource = Source(name='World Bank EdStats: ' + global_cat[indicator_code]['name'],
description=json.dumps(source_description),
datasetId=newdataset.pk)
newsource.save()
logger.info("Inserting a source %s." % newsource.name.encode('utf8'))
global_cat[indicator_code]['source_object'] = newsource
s_unit = short_unit_extract(global_cat[indicator_code]['unitofmeasure'])
newvariable = Variable(name=global_cat[indicator_code]['name'],
unit=global_cat[indicator_code]['unitofmeasure'] if
global_cat[indicator_code]['unitofmeasure'] else '',
short_unit=s_unit,
description=global_cat[indicator_code]['description'],
code=indicator_code,
timespan='1970-' + str(last_available_year),
datasetId=newdataset,
variableTypeId=VariableType.objects.get(pk=4),
sourceId=newsource)
newvariable.save()
global_cat[indicator_code]['variable_object'] = newvariable
vars_to_add.remove(indicator_code)
global_cat[indicator_code]['saved'] = True
logger.info("Inserting a variable %s." % newvariable.name.encode('utf8'))
else:
if not global_cat[indicator_code]['saved']:
newsource = Source.objects.get(name='World Bank EdStats: ' + Variable.objects.get(code=indicator_code, datasetId__in=Dataset.objects.filter(namespace='edstats')).name)
newsource.name = 'World Bank EdStats: ' + global_cat[indicator_code]['name']
source_description['additionalInfo'] = "Definitions and characteristics of countries and other territories: " + "https://ourworldindata.org" + reverse("serveedstatscountryinfo") + "\n"
source_description['additionalInfo'] += "Limitations and exceptions:\n" + global_cat[indicator_code]['limitations'] + "\n" if global_cat[indicator_code]['limitations'] else ''
source_description['additionalInfo'] += "Notes from original source:\n" + global_cat[indicator_code]['sourcenotes'] + "\n" if global_cat[indicator_code]['sourcenotes'] else ''
source_description['additionalInfo'] += "General comments:\n" + global_cat[indicator_code]['comments'] + "\n" if global_cat[indicator_code]['comments'] else ''
source_description['additionalInfo'] += "Statistical concept and methodology:\n" + global_cat[indicator_code]['concept'] + "\n" if global_cat[indicator_code]['concept'] else ''
source_description['additionalInfo'] += "Related source links:\n" + global_cat[indicator_code]['sourcelinks'] + "\n" if global_cat[indicator_code]['sourcelinks'] else ''
source_description['additionalInfo'] += "Other web links:\n" + global_cat[indicator_code]['weblinks'] + "\n" if global_cat[indicator_code]['weblinks'] else ''
source_description['dataPublisherSource'] = global_cat[indicator_code]['source']
newsource.description=json.dumps(source_description)
newsource.datasetId=newdataset.pk
newsource.save()
logger.info("Updating the source %s." % newsource.name.encode('utf8'))
s_unit = short_unit_extract(global_cat[indicator_code]['unitofmeasure'])
newvariable = Variable.objects.get(code=indicator_code, datasetId__in=Dataset.objects.filter(namespace='edstats'))
newvariable.name = global_cat[indicator_code]['name']
newvariable.unit=global_cat[indicator_code]['unitofmeasure'] if global_cat[indicator_code]['unitofmeasure'] else ''
newvariable.short_unit = s_unit
newvariable.description=global_cat[indicator_code]['description']
newvariable.timespan='1970-' + str(last_available_year)
newvariable.datasetId=newdataset
newvariable.sourceId=newsource
newvariable.save()
global_cat[indicator_code]['variable_object'] = newvariable
logger.info("Updating the variable %s." % newvariable.name.encode('utf8'))
global_cat[indicator_code]['saved'] = True
else:
newvariable = global_cat[indicator_code]['variable_object']
if indicator_code not in newly_added_vars:
if not deleted_indicators.get(indicator_code, 0):
while DataValue.objects.filter(variableId__pk=newvariable.pk).first():
with connection.cursor() as c:
c.execute(
'DELETE FROM %s WHERE variableId = %s LIMIT 10000;' %
(DataValue._meta.db_table, newvariable.pk))
deleted_indicators[indicator_code] = True
logger.info("Deleting data values for the variable %s." % indicator_code.encode('utf8'))
for i in range(0, len(data_values)):
data_values_tuple_list.append((data_values[i]['value'], data_values[i]['year'],
country_name_entity_ref[country_code].pk,
newvariable.pk))
if len(
data_values_tuple_list) > 3000: # insert when the length of the list goes over 3000
with connection.cursor() as c:
c.executemany(insert_string, data_values_tuple_list)
logger.info("Dumping data values...")
data_values_tuple_list = []
column_number = 0
if row_number % 10 == 0:
time.sleep(0.001) # this is done in order to not keep the CPU busy all the time, the delay after each 10th row is 1 millisecond
if len(data_values_tuple_list): # insert any leftover data_values
with connection.cursor() as c:
c.executemany(insert_string, data_values_tuple_list)
logger.info("Dumping data values...")
# now deleting subcategories and datasets that are empty (that don't contain any variables), if any
all_edstats_datasets = Dataset.objects.filter(namespace='edstats')
all_edstats_datasets_with_vars = Variable.objects.filter(datasetId__in=all_edstats_datasets).values(
'datasetId').distinct()
all_edstats_datasets_with_vars_dict = {item['datasetId'] for item in all_edstats_datasets_with_vars}
for each in all_edstats_datasets:
if each.pk not in all_edstats_datasets_with_vars_dict:
cat_to_delete = each.subcategoryId
logger.info("Deleting empty dataset %s." % each.name)
logger.info("Deleting empty category %s." % cat_to_delete.name)
each.delete()
cat_to_delete.delete()
newimport = ImportHistory(import_type='edstats', import_time=timezone.now().strftime('%Y-%m-%d %H:%M:%S'),
import_notes='Imported a total of %s data values.' % total_values_tracker,
import_state=json.dumps(
{'file_hash': file_checksum(edstats_downloads_save_location + 'edstats.zip')}))
newimport.save()
# now exporting csvs to the repo
for dataset in dataset_id_oldname_list:
write_dataset_csv(dataset['id'], dataset['newname'], dataset['oldname'], 'edstats_fetcher', '')
print("--- %s seconds ---" % (time.time() - start_time))
|
iascchen/eggduino-station | refs/heads/master | tests/paramsDemo.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
CMD_T = 'AB010105'
CMD_H = 'AB020105'
CMD_Q = 'AB030101'
CMD_T_STOP = 'AB0100'
CMD_H_STOP = 'AB0200'
CMD_Q_STOP = 'AB0300'
init_cmds = [CMD_T_STOP, CMD_H_STOP, CMD_Q_STOP, CMD_T, CMD_H, CMD_Q]
parser = argparse.ArgumentParser(description='For example AB0100,AB0200,AB0200,AB010105,AB020105,AB030101')
parser.add_argument('-c', '--cmds', help='delimited list input', type=str)
args = parser.parse_args()
if args.cmds is None:
args.cmds = ''
print(args)
params_cmds_list = args.cmds.upper().split(',')
if params_cmds_list != ['']:
init_cmds = params_cmds_list
print(init_cmds)
|
ramanajee/phantomjs | refs/heads/master | src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_env_var_test.py | 233 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
if IS_WINDOWS:
TestFlag('catch_exceptions', '1', '0')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
|
dago/ansible-modules-core | refs/heads/devel | cloud/openstack/quantum_subnet.py | 45 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystoneclient are required'")
DOCUMENTATION = '''
---
module: quantum_subnet
version_added: "1.2"
short_description: Add/remove subnet from a network
description:
- Add/remove subnet from a network
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: True
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: True
auth_url:
description:
- The keystone URL for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
required: true
default: None
name:
description:
- The name of the subnet that should be created
required: true
default: None
cidr:
description:
- The CIDR representation of the subnet that should be assigned to the subnet
required: true
default: None
tenant_name:
description:
- The name of the tenant for whom the subnet should be created
required: false
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
dns_nameservers:
description:
- DNS nameservers for this subnet, comma-separated
required: false
default: None
version_added: "1.4"
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should be allocated
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the virtual machines
required: false
default: None
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
'''
EXAMPLES = '''
# Create a subnet for a tenant with the specified subnet
- quantum_subnet: state=present login_username=admin login_password=admin
login_tenant_name=admin tenant_name=tenant1
network_name=network1 name=net1subnet cidr=192.168.0.0/24"
'''
_os_keystone = None
_os_tenant_id = None
_os_network_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = " Error in connecting to neutron: %s" % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
tenant_name = module.params['login_tenant_name']
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_net_id(neutron, module):
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['network_name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json("Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _get_subnet_id(module, neutron):
global _os_network_id
subnet_id = None
_os_network_id = _get_net_id(neutron, module)
if not _os_network_id:
module.fail_json(msg = "network id of network not found.")
else:
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['name'],
}
try:
subnets = neutron.list_subnets(**kwargs)
except Exception, e:
module.fail_json( msg = " Error in getting the subnet list:%s " % e.message)
if not subnets['subnets']:
return None
return subnets['subnets'][0]['id']
def _create_subnet(module, neutron):
neutron.format = 'json'
subnet = {
'name': module.params['name'],
'ip_version': module.params['ip_version'],
'enable_dhcp': module.params['enable_dhcp'],
'tenant_id': _os_tenant_id,
'gateway_ip': module.params['gateway_ip'],
'dns_nameservers': module.params['dns_nameservers'],
'network_id': _os_network_id,
'cidr': module.params['cidr'],
}
if module.params['allocation_pool_start'] and module.params['allocation_pool_end']:
allocation_pools = [
{
'start' : module.params['allocation_pool_start'],
'end' : module.params['allocation_pool_end']
}
]
subnet.update({'allocation_pools': allocation_pools})
if not module.params['gateway_ip']:
subnet.pop('gateway_ip')
if module.params['dns_nameservers']:
subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',')
else:
subnet.pop('dns_nameservers')
try:
new_subnet = neutron.create_subnet(dict(subnet=subnet))
except Exception, e:
module.fail_json(msg = "Failure in creating subnet: %s" % e.message)
return new_subnet['subnet']['id']
def _delete_subnet(module, neutron, subnet_id):
try:
neutron.delete_subnet(subnet_id)
except Exception, e:
module.fail_json( msg = "Error in deleting subnet: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
network_name = dict(required=True),
cidr = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
ip_version = dict(default='4', choices=['4', '6']),
enable_dhcp = dict(default='true', type='bool'),
gateway_ip = dict(default=None),
dns_nameservers = dict(default=None),
allocation_pool_start = dict(default=None),
allocation_pool_end = dict(default=None),
))
module = AnsibleModule(argument_spec=argument_spec)
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
subnet_id = _create_subnet(module, neutron)
module.exit_json(changed = True, result = "Created" , id = subnet_id)
else:
module.exit_json(changed = False, result = "success" , id = subnet_id)
else:
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
module.exit_json(changed = False, result = "success")
else:
_delete_subnet(module, neutron, subnet_id)
module.exit_json(changed = True, result = "deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
|
ProgVal/Limnoria-test | refs/heads/debug-pypy-sqlite | plugins/PluginDownloader/test.py | 3 | ###
# Copyright (c) 2011, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import sys
import shutil
from supybot.test import *
import supybot.utils.minisix as minisix
pluginsPath = '%s/test-plugins' % os.getcwd()
class PluginDownloaderTestCase(PluginTestCase):
plugins = ('PluginDownloader',)
config = {'supybot.directories.plugins': [pluginsPath]}
def setUp(self):
PluginTestCase.setUp(self)
try:
shutil.rmtree(pluginsPath)
except:
pass
os.mkdir(pluginsPath)
def tearDown(self):
try:
shutil.rmtree(pluginsPath)
finally:
PluginTestCase.tearDown(self)
def _testPluginInstalled(self, name):
assert os.path.isdir(pluginsPath + '/%s/' % name)
assert os.path.isfile(pluginsPath + '/%s/plugin.py' % name)
assert os.path.isfile(pluginsPath + '/%s/config.py' % name)
def testRepolist(self):
self.assertRegexp('repolist', '(.*, )?ProgVal(, .*)?')
self.assertRegexp('repolist', '(.*, )?quantumlemur(, .*)?')
self.assertRegexp('repolist ProgVal', '(.*, )?AttackProtector(, .*)?')
def testInstallProgVal(self):
self.assertError('plugindownloader install ProgVal Darcs')
self.assertNotError('plugindownloader install ProgVal AttackProtector')
self.assertError('plugindownloader install ProgVal Darcs')
self._testPluginInstalled('AttackProtector')
def testInstallQuantumlemur(self):
self.assertError('plugindownloader install quantumlemur AttackProtector')
self.assertNotError('plugindownloader install quantumlemur Listener')
self.assertError('plugindownloader install quantumlemur AttackProtector')
self._testPluginInstalled('Listener')
def testInstallStepnem(self):
self.assertNotError('plugindownloader install stepnem Freenode')
self._testPluginInstalled('Freenode')
def testInstallNanotubeBitcoin(self):
self.assertNotError('plugindownloader install nanotube-bitcoin GPG')
self._testPluginInstalled('GPG')
def testInstallMtughanWeather(self):
self.assertNotError('plugindownloader install mtughan-weather '
'WunderWeather')
self._testPluginInstalled('WunderWeather')
def testInstallSpiderDave(self):
self.assertNotError('plugindownloader install SpiderDave Pastebin')
self._testPluginInstalled('Pastebin')
def testInfo(self):
self.assertResponse('plugindownloader info ProgVal Twitter',
'Advanced Twitter plugin for Supybot, with capabilities '
'handling, and per-channel user account.')
if minisix.PY3:
def test_2to3(self):
self.assertRegexp('plugindownloader install SpiderDave Pastebin',
'convert')
self.assertNotError('load Pastebin')
if not network:
class PluginDownloaderTestCase(PluginTestCase):
pass
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
h4r5h1t/django-hauthy | refs/heads/hauthy | django/contrib/gis/geos/geometry.py | 82 | """
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
# Python, ctypes and types dependencies.
from ctypes import addressof, byref, c_double
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase, gdal
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.mutable_list import ListMixin
# These functions provide access to a thread-local instance
# of their corresponding GEOS I/O class.
from django.contrib.gis.geos.prototypes.io import (
ewkb_w, wkb_r, wkb_w, wkt_r, wkt_w,
)
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
ptr_type = GEOM_PTR
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif json_regex.match(geo_input):
# Handling GeoJSON input.
if not gdal.HAS_GDAL:
raise ValueError('Initializing geometry from JSON input requires GDAL.')
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, six.memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int):
self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
self.__class__ = GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr and capi:
capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"EWKT is used for the string representation."
return self.ewkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(six.memoryview(wkb))
if not ptr:
raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
# ### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
# #### Coordinate Sequence Routines ####
@property
def has_cs(self):
"Returns True if this Geometry has a coordinate sequence, False if not."
# Only these geometries are allowed to have coordinate sequences.
if isinstance(self, (Point, LineString, LinearRing)):
return True
else:
return False
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
# #### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
# #### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
return capi.geos_isvalidreason(self.ptr).decode()
# #### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
# #### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0:
return None
else:
return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
# #### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (SRID + WKT) of the Geometry. Note that Z values
are only included in this representation if GEOS >= 3.3.0.
"""
if self.get_srid():
return 'SRID=%s;%s' % (self.srid, self.wkt)
else:
return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w(3 if self.hasz else 2).write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(3 if self.hasz else 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL is installed.
"""
if gdal.HAS_GDAL:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported when GDAL is installed.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(3 if self.hasz else 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
return PreparedGeometry(self)
# #### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
if self.srid:
try:
return gdal.OGRGeometry(self.wkb, self.srid)
except SRSException:
pass
return gdal.OGRGeometry(self.wkb)
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to return a SpatialReference object.')
if self.srid:
try:
return gdal.SpatialReference(self.srid)
except SRSException:
pass
return None
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
# #### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def interpolate(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def project(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
return capi.geos_project_normalized(self.ptr, point.ptr)
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
# #### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumference of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
# Class mapping dictionary. Has to be at the end to avoid import
# conflicts with GEOSGeometry.
from django.contrib.gis.geos.linestring import LineString, LinearRing # isort:skip
from django.contrib.gis.geos.point import Point # isort:skip
from django.contrib.gis.geos.polygon import Polygon # isort:skip
from django.contrib.gis.geos.collections import ( # isort:skip
GeometryCollection, MultiPoint, MultiLineString, MultiPolygon)
from django.contrib.gis.geos.prepared import PreparedGeometry # isort:skip
GEOS_CLASSES = {
0: Point,
1: LineString,
2: LinearRing,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
}
|
zhangjunli177/sahara | refs/heads/master | sahara/topology/topology_helper.py | 4 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
from oslo_config import cfg
from oslo_log import log
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.utils.openstack import base as b
from sahara.utils.openstack import nova
from sahara.utils import xmlutils as x
TOPOLOGY_CONFIG = {
"topology.node.switch.mapping.impl":
"org.apache.hadoop.net.ScriptBasedMapping",
"topology.script.file.name":
"/etc/hadoop/topology.sh"
}
LOG = log.getLogger(__name__)
opts = [
cfg.BoolOpt('enable_data_locality',
default=False,
help="Enables data locality for hadoop cluster. "
"Also enables data locality for Swift used by hadoop. "
"If enabled, 'compute_topology' and 'swift_topology' "
"configuration parameters should point to OpenStack and "
"Swift topology correspondingly."),
cfg.BoolOpt('enable_hypervisor_awareness',
default=True,
help="Enables four-level topology for data locality. "
"Works only if corresponding plugin supports such mode."),
cfg.StrOpt('compute_topology_file',
default='etc/sahara/compute.topology',
help="File with nova compute topology. "
"It should contain mapping between nova computes and "
"racks."),
cfg.StrOpt('swift_topology_file',
default='etc/sahara/swift.topology',
help="File with Swift topology."
"It should contain mapping between Swift nodes and "
"racks.")
]
CONF = cfg.CONF
CONF.register_opts(opts)
def _read_swift_topology():
LOG.debug("Reading Swift nodes topology from {config}".format(
config=CONF.swift_topology_file))
topology = {}
try:
with open(CONF.swift_topology_file) as f:
for line in f:
line = line.strip()
if not line:
continue
(host, path) = line.split()
topology[host] = path
except IOError:
LOG.warning(_LW("Unable to read Swift nodes topology from {config}")
.format(config=CONF.swift_topology_file))
return {}
return topology
def _read_compute_topology():
LOG.debug("Reading compute nodes topology from {config}".format(
config=CONF.compute_topology_file))
ctx = context.ctx()
tenant_id = str(ctx.tenant_id)
topology = {}
try:
with open(CONF.compute_topology_file) as f:
for line in f:
line = line.strip()
if not line:
continue
(host, path) = line.split()
# Calulating host id based on tenant id and host
# using the same algorithm as in nova
# see nova/api/openstack/compute/views/servers.py
# def _get_host_id(instance):
sha_hash = hashlib.sha224(tenant_id + host)
topology[sha_hash.hexdigest()] = path
except IOError:
raise ex.NotFoundException(
CONF.compute_topology_file,
_("Unable to find file %s with compute topology"))
return topology
def generate_topology_map(cluster, is_node_awareness):
mapping = _read_compute_topology()
nova_client = nova.client()
topology_mapping = {}
for ng in cluster.node_groups:
for i in ng.instances:
# TODO(alazarev) get all servers info with one request
ni = b.execute_with_retries(nova_client.servers.get, i.instance_id)
hostId = ni.hostId
if hostId not in mapping:
raise ex.NotFoundException(
i.instance_id,
_("Was not able to find compute node topology for VM %s"))
rack = mapping[hostId]
if is_node_awareness:
rack += "/" + hostId
topology_mapping[i.instance_name] = rack
topology_mapping[i.management_ip] = rack
topology_mapping[i.internal_ip] = rack
topology_mapping.update(_read_swift_topology())
return topology_mapping
def vm_awareness_core_config():
c = x.load_hadoop_xml_defaults('topology/resources/core-template.xml')
result = [cfg for cfg in c if cfg['value']]
if not CONF.enable_hypervisor_awareness:
# not leveraging 4-layer approach so override template value
param = next((prop for prop in result
if prop['name'] == 'net.topology.impl'), None)
if param:
param['value'] = 'org.apache.hadoop.net.NetworkTopology'
LOG.debug("Vm awareness will add following configs in core-site "
"params: {result}".format(result=result))
return result
def vm_awareness_mapred_config():
c = x.load_hadoop_xml_defaults('topology/resources/mapred-template.xml')
result = [cfg for cfg in c if cfg['value']]
LOG.debug("Vm awareness will add following configs in map-red "
"params: {result}".format(result=result))
return result
def vm_awareness_all_config():
return vm_awareness_core_config() + vm_awareness_mapred_config()
|
shaanlan/youtube-dl | refs/heads/master | youtube_dl/extractor/drtv.py | 112 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_iso8601,
)
class DRTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)*(?P<id>[\da-z-]+)(?:[/#?]|$)'
_TEST = {
'url': 'https://www.dr.dk/tv/se/boern/ultra/panisk-paske/panisk-paske-5',
'md5': 'dc515a9ab50577fa14cc4e4b0265168f',
'info_dict': {
'id': 'panisk-paske-5',
'ext': 'mp4',
'title': 'Panisk Påske (5)',
'description': 'md5:ca14173c5ab24cd26b0fcc074dff391c',
'timestamp': 1426984612,
'upload_date': '20150322',
'duration': 1455,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>Programmet er ikke længere tilgængeligt' in webpage:
raise ExtractorError(
'Video %s is not available' % video_id, expected=True)
video_id = self._search_regex(
r'data-(?:material-identifier|episode-slug)="([^"]+)"',
webpage, 'video id')
programcard = self._download_json(
'http://www.dr.dk/mu/programcard/expanded/%s' % video_id,
video_id, 'Downloading video JSON')
data = programcard['Data'][0]
title = data['Title']
description = data['Description']
timestamp = parse_iso8601(data['CreatedTime'])
thumbnail = None
duration = None
restricted_to_denmark = False
formats = []
subtitles = {}
for asset in data['Assets']:
if asset['Kind'] == 'Image':
thumbnail = asset['Uri']
elif asset['Kind'] == 'VideoResource':
duration = asset['DurationInMilliseconds'] / 1000.0
restricted_to_denmark = asset['RestrictedToDenmark']
spoken_subtitles = asset['Target'] == 'SpokenSubtitles'
for link in asset['Links']:
uri = link['Uri']
target = link['Target']
format_id = target
preference = None
if spoken_subtitles:
preference = -1
format_id += '-spoken-subtitles'
if target == 'HDS':
formats.extend(self._extract_f4m_formats(
uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43',
video_id, preference, f4m_id=format_id))
elif target == 'HLS':
formats.extend(self._extract_m3u8_formats(
uri, video_id, 'mp4', preference=preference,
m3u8_id=format_id))
else:
bitrate = link.get('Bitrate')
if bitrate:
format_id += '-%s' % bitrate
formats.append({
'url': uri,
'format_id': format_id,
'tbr': bitrate,
'ext': link.get('FileFormat'),
})
subtitles_list = asset.get('SubtitlesList')
if isinstance(subtitles_list, list):
LANGS = {
'Danish': 'dk',
}
for subs in subtitles_list:
lang = subs['Language']
subtitles[LANGS.get(lang, lang)] = [{'url': subs['Uri'], 'ext': 'vtt'}]
if not formats and restricted_to_denmark:
raise ExtractorError(
'Unfortunately, DR is not allowed to show this program outside Denmark.', expected=True)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
|
holyangel/LGE_G3 | refs/heads/Redo | Documentation/target/tcm_mod_builder.py | 4981 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
spirrello/spirrello-pynet-work | refs/heads/master | applied_python/lib/python2.7/site-packages/pylint/test/input/func_block_disable_msg.py | 12 | # pylint: disable=C0302,bare-except,print-statement
"""pylint option block-disable"""
from __future__ import print_function
class Foo(object):
"""block-disable test"""
def __init__(self):
self._test = "42"
def meth1(self, arg):
"""this issues a message"""
print(self)
def meth2(self, arg):
"""and this one not"""
# pylint: disable=W0613
print(self._test\
+ "foo")
def meth3(self):
"""test one line disabling"""
# no error
print(self.bla) # pylint: disable=E1101
# error
print(self.blop)
def meth4(self):
"""test re-enabling"""
# pylint: disable=E1101
# no error
print(self.bla)
print(self.blop)
# pylint: enable=E1101
# error
print(self.blip)
def meth5(self):
"""test IF sub-block re-enabling"""
# pylint: disable=E1101
# no error
print(self.bla)
if self.blop:
# pylint: enable=E1101
# error
print(self.blip)
else:
# no error
print(self.blip)
# no error
print(self.blip)
def meth6(self):
"""test TRY/EXCEPT sub-block re-enabling"""
# pylint: disable=E1101
# no error
print(self.bla)
try:
# pylint: enable=E1101
# error
print(self.blip)
except UndefinedName: # pylint: disable=E0602
# no error
print(self.blip)
# no error
print(self.blip)
def meth7(self):
"""test one line block opening disabling"""
if self.blop: # pylint: disable=E1101
# error
print(self.blip)
else:
# error
print(self.blip)
# error
print(self.blip)
def meth8(self):
"""test late disabling"""
# error
print(self.blip)
# pylint: disable=E1101
# no error
print(self.bla)
print(self.blop)
def meth9(self):
"""test re-enabling right after a block with whitespace"""
eris = 5
if eris: # pylint: disable=using-constant-test
print("In block")
# pylint: disable=E1101
# no error
print(self.bla)
print(self.blu)
# pylint: enable=E1101
# error
print(self.blip)
def meth10(self):
"""Test double disable"""
# pylint: disable=E1101
# no error
print(self.bla)
# pylint: disable=E1101
print(self.blu)
class ClassLevelMessage(object):
"""shouldn't display to much attributes/not enough methods messages
"""
# pylint: disable=R0902,R0903
def __init__(self):
self.attr1 = 1
self.attr2 = 1
self.attr3 = 1
self.attr4 = 1
self.attr5 = 1
self.attr6 = 1
self.attr7 = 1
self.attr8 = 1
self.attr9 = 1
self.attr0 = 1
def too_complex_but_thats_ok(self, attr1, attr2):
"""THIS Method has too much branches and returns but i don't care
"""
# pylint: disable=R0912,R0911
try:
attr3 = attr1+attr2
except ValueError:
attr3 = None
except:
return 'duh', self
if attr1:
for i in attr1:
if attr2:
return i
else:
return 'duh'
elif attr2:
for i in attr2:
if attr2:
return i
else:
return 'duh'
else:
for i in range(15):
if attr3:
return i
else:
return 'doh'
return None
print('hop, too many lines but i don\'t care')
|
HBEE/odoo-addons | refs/heads/8.0 | sale_order_mail_product_attachment/sale.py | 3 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import netsvc
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position, flag, context)
partner_obj = self.pool.get('res.partner')
lang = partner_obj.browse(cr, uid, partner_id).lang
context_partner = {'lang': lang, 'partner_id': partner_id}
product_obj = self.pool.get('product.product')
attachment_obj = self.pool['ir.attachment']
if product:
product_obj = product_obj.browse(cr, uid, product, context=context_partner)
if not flag:
attachment_ids = attachment_obj.search(cr, uid, [('res_model','=','product.product'),('res_id','=',product)], context=context)
if attachment_ids:
attachemnt_desc = ', '.join([at.name for at in attachment_obj.browse(cr, uid, attachment_ids, context=context)])
res['value']['name'] += '\n' + ('Ver adjuntos: ') + attachemnt_desc
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
destijl/forensicartifacts | refs/heads/master | frontend/thirdparty/networkx-1.9/networkx/generators/line.py | 8 | # -*- coding: ascii -*-
"""
Line graph algorithms.
Undirected Graphs
-----------------
For an undirected graph G without multiple edges, each edge can be written as
a set {u,v}. Its line graph L has the edges of G as its nodes. If x and y
are two nodes in L, then {x,y} is an edge in L if and only if the intersection
of x and y is nonempty. Thus, the set of all edges is determined by the
set of all pair-wise intersections of edges in G.
Trivially, every edge x={u,v} in G would have a nonzero intersection with
itself, and so every node in L should have a self-loop. This is not so
interesting, and the original context of line graphs was with simple graphs,
which had no self-loops or multiple edges. The line graph was also meant
to be simple graph and thus, self-loops in L are not part of the standard
definition of a line graph. In a pair-wise intersection matrix, this is
analogous to not including the diagonal as part of the line graph definition.
Self-loops and multiple edges in G add nodes to L in a natural way, and do not
require any fundamental changes to the definition. It might be argued that
the self-loops we excluded before should now be included. However, the
self-loops are still "trivial" in some sense and thus, are usually excluded.
Directed Graphs
---------------
For a directed graph G without multiple edges, each edge can be written as a
tuple (u,v). Its line graph L has the edges of G as its nodes. If x=(a,b) and
y=(c,d) are two nodes in L, then (x,y) is an edge in L if and only if the
tail of x matches the head of y---e.g., b=c.
Due to the directed nature of the edges, it is no longer the case that
every edge x=(u,v) should be connected to itself with a self-loop in L. Now,
the only time self-loops arise is if G itself has a self-loop. So such
self-loops are no longer "trivial" but instead, represent essential features
of the topology of G. For this reason, the historical development of line
digraphs is such that self-loops are included. When the graph G has multiple
edges, once again only superficial changes are required to the definition.
References
----------
Harary, Frank, and Norman, Robert Z., "Some properties of line digraphs",
Rend. Circ. Mat. Palermo, II. Ser. 9 (1960), 161--168.
Hemminger, R. L.; Beineke, L. W. (1978), "Line graphs and line digraphs",
in Beineke, L. W.; Wilson, R. J., Selected Topics in Graph Theory, Academic
Press Inc., pp. 271--305.
"""
# Copyright (C) 2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["Aric Hagberg (hagberg@lanl.gov)",
"Pieter Swart (swart@lanl.gov)",
"Dan Schult (dschult@colgate.edu)",
"chebee7i (chebee7i@gmail.com)"])
__all__ = ['line_graph']
import networkx as nx
def line_graph(G, create_using=None):
"""Return the line graph of the graph or digraph G.
The line graph of a graph G has a node for each edge in G and an edge
between those nodes if the two edges in G share a common node. For directed
graphs, nodes are connected only if they form a directed path of length 2.
The nodes of the line graph are 2-tuples of nodes in the original graph
(or 3-tuples for multigraphs, with the key of the edge as the 3rd element).
For more discussion, see the docstring in :mod:`networkx.generators.line`.
Parameters
----------
G : graph
A NetworkX Graph, DiGraph, MultiGraph, or MultiDigraph.
Returns
-------
L : graph
The line graph of G.
Examples
--------
>>> G = nx.star_graph(3)
>>> L = nx.line_graph(G)
>>> print(sorted(L.edges())) # makes a clique, K3
[((0, 1), (0, 2)), ((0, 1), (0, 3)), ((0, 3), (0, 2))]
Notes
-----
Graph, node, and edge data are not propagated to the new graph. For
undirected graphs, the nodes in G must be sortable---otherwise, the
constructed line graph may not be correct.
"""
if G.is_directed():
L = _lg_directed(G, create_using=create_using)
else:
L = _lg_undirected(G, selfloops=False, create_using=create_using)
return L
def _node_func(G):
"""Returns a function which returns a sorted node for line graphs.
When constructing a line graph for undirected graphs, we must normalize
the ordering of nodes as they appear in the edge.
"""
if G.is_multigraph():
def sorted_node(u, v, key):
return (u, v, key) if u <= v else (v, u, key)
else:
def sorted_node(u, v):
return (u, v) if u <= v else (v, u)
return sorted_node
def _edge_func(G):
"""Returns the edges from G, handling keys for multigraphs as necessary.
"""
if G.is_multigraph():
def get_edges(nbunch=None):
return G.edges_iter(nbunch, keys=True)
else:
def get_edges(nbunch=None):
return G.edges_iter(nbunch)
return get_edges
def _sorted_edge(u, v):
"""Returns a sorted edge.
During the construction of a line graph for undirected graphs, the data
structure can be a multigraph even though the line graph will never have
multiple edges between its nodes. For this reason, we must make sure not
to add any edge more than once. This requires that we build up a list of
edges to add and then remove all duplicates. And so, we must normalize
the representation of the edges.
"""
return (u, v) if u <= v else (v, u)
def _lg_directed(G, create_using=None):
"""Return the line graph L of the (multi)digraph G.
Edges in G appear as nodes in L, represented as tuples of the form (u,v)
or (u,v,key) if G is a multidigraph. A node in L corresponding to the edge
(u,v) is connected to every node corresponding to an edge (v,w).
Parameters
----------
G : digraph
A directed graph or directed multigraph.
create_using : None
A digraph instance used to populate the line graph.
"""
if create_using is None:
L = G.__class__()
else:
L = create_using
# Create a graph specific edge function.
get_edges = _edge_func(G)
for from_node in get_edges():
# from_node is: (u,v) or (u,v,key)
L.add_node(from_node)
for to_node in get_edges(from_node[1]):
L.add_edge(from_node, to_node)
return L
def _lg_undirected(G, selfloops=False, create_using=None):
"""Return the line graph L of the (multi)graph G.
Edges in G appear as nodes in L, represented as sorted tuples of the form
(u,v), or (u,v,key) if G is a multigraph. A node in L corresponding to
the edge {u,v} is connected to every node corresponding to an edge that
involves u or v.
Parameters
----------
G : graph
An undirected graph or multigraph.
selfloops : bool
If `True`, then self-loops are included in the line graph. If `False`,
they are excluded.
create_using : None
A graph instance used to populate the line graph.
Notes
-----
The standard algorithm for line graphs of undirected graphs does not
produce self-loops.
"""
if create_using is None:
L = G.__class__()
else:
L = create_using
# Graph specific functions for edges and sorted nodes.
get_edges = _edge_func(G)
sorted_node = _node_func(G)
# Determine if we include self-loops or not.
shift = 0 if selfloops else 1
edges = set([])
for u in G:
# Label nodes as a sorted tuple of nodes in original graph.
nodes = [ sorted_node(*x) for x in get_edges(u) ]
if len(nodes) == 1:
# Then the edge will be an isolated node in L.
L.add_node(nodes[0])
# Add a clique of `nodes` to graph. To prevent double adding edges,
# especially important for multigraphs, we store the edges in
# canonical form in a set.
for i, a in enumerate(nodes):
edges.update([ _sorted_edge(a,b) for b in nodes[i+shift:] ])
L.add_edges_from(edges)
return L
|
chrish42/pylearn | refs/heads/master | pylearn2/utils/tests/test_general.py | 45 | """
Tests for pylearn2.utils.general functions.
"""
from pylearn2.utils import contains_nan, contains_inf, isfinite
import numpy as np
def test_contains_nan():
"""
Tests that pylearn2.utils.contains_nan correctly
identifies `np.nan` values in an array.
"""
arr = np.random.random(100)
assert not contains_nan(arr)
arr[0] = np.nan
assert contains_nan(arr)
def test_contains_inf():
"""
Tests that pylearn2.utils.contains_inf correctly
identifies `np.inf` values in an array.
"""
arr = np.random.random(100)
assert not contains_inf(arr)
arr[0] = np.nan
assert not contains_inf(arr)
arr[1] = np.inf
assert contains_inf(arr)
arr[1] = -np.inf
assert contains_inf(arr)
def test_isfinite():
"""
Tests that pylearn2.utils.isfinite correctly
identifies `np.nan` and `np.inf` values in an array.
"""
arr = np.random.random(100)
assert isfinite(arr)
arr[0] = np.nan
assert not isfinite(arr)
arr[0] = np.inf
assert not isfinite(arr)
arr[0] = -np.inf
assert not isfinite(arr)
|
felliott/osf.io | refs/heads/develop | api_tests/files/views/test_file_detail.py | 6 | from __future__ import unicode_literals
import itsdangerous
import mock
import pytest
import pytz
from django.utils import timezone
from addons.base.utils import get_mfr_url
from addons.github.models import GithubFileNode
from addons.osfstorage import settings as osfstorage_settings
from addons.osfstorage.listeners import checkin_files_task
from api.base.settings.defaults import API_BASE
from api_tests import utils as api_utils
from framework.auth.core import Auth
from osf.models import NodeLog, Session, QuickFilesNode
from osf.utils.permissions import WRITE, READ
from osf.utils.workflows import DefaultStates
from osf_tests.factories import (
AuthUserFactory,
CommentFactory,
ProjectFactory,
UserFactory,
PreprintFactory,
)
from website import settings as website_settings
# stolen from^W^Winspired by DRF
# rest_framework.fields.DateTimeField.to_representation
def _dt_to_iso8601(value):
iso8601 = value.isoformat()
if iso8601.endswith('+00:00'):
iso8601 = iso8601[:-6] + 'Z' # microsecond precision
return iso8601
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestFileView:
@pytest.fixture()
def node(self, user):
return ProjectFactory(creator=user, comment_level='public')
@pytest.fixture()
def quickfiles_node(self, user):
return QuickFilesNode.objects.get(creator=user)
@pytest.fixture()
def file(self, user, node):
return api_utils.create_test_file(node, user, create_guid=False)
@pytest.fixture()
def file_url(self, file):
return '/{}files/{}/'.format(API_BASE, file._id)
def test_must_have_auth_and_be_contributor(self, app, file_url):
# test_must_have_auth(self, app, file_url):
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# test_must_be_contributor(self, app, file_url):
non_contributor = AuthUserFactory()
res = app.get(file_url, auth=non_contributor.auth, expect_errors=True)
assert res.status_code == 403
def test_deleted_file_return_410(self, app, node, user):
deleted_file = api_utils.create_test_file(node, user, create_guid=True)
url_with_guid = '/{}files/{}/'.format(
API_BASE, deleted_file.get_guid()._id
)
url_with_id = '/{}files/{}/'.format(API_BASE, deleted_file._id)
res = app.get(url_with_guid, auth=user.auth)
assert res.status_code == 200
res = app.get(url_with_id, auth=user.auth)
assert res.status_code == 200
deleted_file.delete(user=user, save=True)
res = app.get(url_with_guid, auth=user.auth, expect_errors=True)
assert res.status_code == 410
res = app.get(url_with_id, auth=user.auth, expect_errors=True)
assert res.status_code == 410
def test_disabled_users_quickfiles_file_detail_gets_410(self, app, quickfiles_node, user):
file_node = api_utils.create_test_file(quickfiles_node, user, create_guid=True)
url_with_guid = '/{}files/{}/'.format(
API_BASE, file_node.get_guid()._id
)
url_with_id = '/{}files/{}/'.format(API_BASE, file_node._id)
res = app.get(url_with_id)
assert res.status_code == 200
res = app.get(url_with_guid, auth=user.auth)
assert res.status_code == 200
user.is_disabled = True
user.save()
res = app.get(url_with_id, expect_errors=True)
assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \
' quickfiles are no longer available.'
assert res.status_code == 410
res = app.get(url_with_guid, expect_errors=True)
assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \
' quickfiles are no longer available.'
assert res.status_code == 410
def test_file_guid_guid_status(self, app, user, file, file_url):
# test_unvisited_file_has_no_guid
res = app.get(file_url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['guid'] is None
# test_visited_file_has_guid
guid = file.get_guid(create=True)
res = app.get(file_url, auth=user.auth)
assert res.status_code == 200
assert guid is not None
assert res.json['data']['attributes']['guid'] == guid._id
def test_file_with_wrong_guid(self, app, user):
url = '/{}files/{}/'.format(API_BASE, user._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
@mock.patch('api.base.throttling.CreateGuidThrottle.allow_request')
def test_file_guid_not_created_with_basic_auth(
self, mock_allow, app, user, file_url):
res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth)
guid = res.json['data']['attributes'].get('guid', None)
assert res.status_code == 200
assert mock_allow.call_count == 1
assert guid is None
@mock.patch('api.base.throttling.CreateGuidThrottle.allow_request')
def test_file_guid_created_with_cookie(
self, mock_allow, app, user, file_url, file):
session = Session(data={'auth_user_id': user._id})
session.save()
cookie = itsdangerous.Signer(
website_settings.SECRET_KEY
).sign(session._id)
app.set_cookie(website_settings.COOKIE_NAME, cookie.decode())
res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth)
app.reset() # clear cookie
assert res.status_code == 200
guid = res.json['data']['attributes'].get('guid', None)
assert guid is not None
assert guid == file.get_guid()._id
assert mock_allow.call_count == 1
def test_get_file(self, app, user, file_url, file):
res = app.get(file_url, auth=user.auth)
file.versions.first().reload()
assert res.status_code == 200
assert set(res.json.keys()) == {'meta', 'data'}
attributes = res.json['data']['attributes']
assert attributes['path'] == file.path
assert attributes['kind'] == file.kind
assert attributes['name'] == file.name
assert attributes['materialized_path'] == file.materialized_path
assert attributes['last_touched'] is None
assert attributes['provider'] == file.provider
assert attributes['size'] == file.versions.first().size
assert attributes['current_version'] == len(file.history)
assert attributes['date_modified'] == _dt_to_iso8601(
file.versions.first().created.replace(tzinfo=pytz.utc)
)
assert attributes['date_created'] == _dt_to_iso8601(
file.versions.last().created.replace(tzinfo=pytz.utc)
)
assert attributes['extra']['hashes']['md5'] is None
assert attributes['extra']['hashes']['sha256'] is None
assert attributes['tags'] == []
# make sure download link has a trailing slash
# so that downloads don't 301
assert res.json['data']['links']['download'].endswith('/')
def test_file_has_rel_link_to_owning_project(
self, app, user, file_url, node):
res = app.get(file_url, auth=user.auth)
assert res.status_code == 200
assert 'target' in res.json['data']['relationships'].keys()
expected_url = node.api_v2_url
actual_url = res.json['data']['relationships']['target']['links']['related']['href']
assert expected_url in actual_url
def test_file_has_comments_link(self, app, user, file, file_url):
file.get_guid(create=True)
res = app.get(file_url, auth=user.auth)
assert res.status_code == 200
assert 'comments' in res.json['data']['relationships'].keys()
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert app.get(url, auth=user.auth).status_code == 200
assert res.json['data']['type'] == 'files'
def test_file_has_correct_unread_comments_count(
self, app, user, file, node):
contributor = AuthUserFactory()
node.add_contributor(contributor, auth=Auth(user), save=True)
CommentFactory(
node=node,
target=file.get_guid(create=True),
user=contributor, page='files'
)
res = app.get(
'/{}files/{}/?related_counts=True'.format(API_BASE, file._id),
auth=user.auth
)
assert res.status_code == 200
unread_comments = res.json['data']['relationships']['comments']['links']['related']['meta']['unread']
assert unread_comments == 1
def test_only_project_contrib_can_comment_on_closed_project(
self, app, user, node, file_url):
node.comment_level = 'private'
node.is_public = True
node.save()
res = app.get(file_url, auth=user.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert res.status_code == 200
assert can_comment is True
non_contributor = AuthUserFactory()
res = app.get(file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert res.status_code == 200
assert can_comment is False
def test_logged_or_not_user_comment_status_on_open_project(
self, app, node, file_url):
node.is_public = True
node.save()
# test_any_loggedin_user_can_comment_on_open_project(self, app, node,
# file_url):
non_contributor = AuthUserFactory()
res = app.get(file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert res.status_code == 200
assert can_comment is True
# test_non_logged_in_user_cant_comment(self, app, file_url, node):
res = app.get(file_url)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert res.status_code == 200
assert can_comment is False
def test_checkout(self, app, user, file, file_url, node):
assert file.checkout is None
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth)
file.reload()
file.save()
node.reload()
assert res.status_code == 200
assert file.checkout == user
res = app.get(file_url, auth=user.auth)
assert node.logs.count() == 2
assert node.logs.latest().action == NodeLog.CHECKED_OUT
assert node.logs.latest().user == user
assert user._id == res.json['data']['relationships']['checkout']['links']['related']['meta']['id']
assert '/{}users/{}/'.format(
API_BASE, user._id
) in res.json['data']['relationships']['checkout']['links']['related']['href']
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=user.auth)
file.reload()
assert file.checkout is None
assert res.status_code == 200
def test_checkout_file_error(self, app, user, file_url, file):
# test_checkout_file_no_type
res = app.put_json_api(
file_url,
{'data': {'id': file._id, 'attributes': {'checkout': user._id}}},
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
# test_checkout_file_no_id
res = app.put_json_api(
file_url,
{'data': {'type': 'files', 'attributes': {'checkout': user._id}}},
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
# test_checkout_file_incorrect_type
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'Wrong type.',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_checkout_file_incorrect_id
res = app.put_json_api(
file_url, {
'data': {
'id': '12345',
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_checkout_file_no_attributes
res = app.put_json_api(
file_url,
{'data': {'id': file._id, 'type': 'files'}},
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
def test_must_set_self(self, app, user, file, file_url):
user_unauthorized = UserFactory()
assert file.checkout is None
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user_unauthorized._id
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
assert res.status_code == 400
assert file.checkout is None
def test_must_be_self(self, app, file, file_url):
user = AuthUserFactory()
file.checkout = user
file.save()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
assert res.status_code == 403
assert file.checkout == user
def test_admin_can_checkin(self, app, user, node, file, file_url):
user_unauthorized = UserFactory()
node.add_contributor(user_unauthorized)
file.checkout = user_unauthorized
file.save()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 200
assert file.checkout is None
assert node.logs.latest().action == NodeLog.CHECKED_IN
assert node.logs.latest().user == user
def test_admin_can_checkout(self, app, user, file_url, file, node):
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 200
assert file.checkout == user
assert node.logs.latest().action == NodeLog.CHECKED_OUT
assert node.logs.latest().user == user
def test_cannot_checkin_when_already_checked_in(
self, app, user, node, file, file_url):
count = node.logs.count()
assert not file.is_checked_out
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 200
assert node.logs.count() == count
assert file.checkout is None
def test_cannot_checkout_when_checked_out(
self, app, user, node, file, file_url):
user_unauthorized = UserFactory()
node.add_contributor(user_unauthorized)
file.checkout = user_unauthorized
file.save()
count = node.logs.count()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 200
assert file.checkout == user_unauthorized
assert node.logs.count() == count
def test_noncontrib_and_read_contrib_cannot_checkout(
self, app, file, node, file_url):
# test_noncontrib_cannot_checkout
non_contrib = AuthUserFactory()
assert file.checkout is None
assert not node.has_permission(non_contrib, READ)
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': non_contrib._id
}
}
}, auth=non_contrib.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 403
assert file.checkout is None
assert node.logs.latest().action != NodeLog.CHECKED_OUT
# test_read_contrib_cannot_checkout
read_contrib = AuthUserFactory()
node.add_contributor(read_contrib, permissions=READ)
node.save()
assert not node.can_edit(user=read_contrib)
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=read_contrib.auth, expect_errors=True)
file.reload()
assert res.status_code == 403
assert file.checkout is None
assert node.logs.latest().action != NodeLog.CHECKED_OUT
def test_write_contrib_can_checkin(self, app, node, file, file_url):
write_contrib = AuthUserFactory()
node.add_contributor(write_contrib, permissions=WRITE)
node.save()
assert node.can_edit(user=write_contrib)
file.checkout = write_contrib
file.save()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=write_contrib.auth, )
file.reload()
assert res.status_code == 200
assert file.checkout is None
@mock.patch('addons.osfstorage.listeners.enqueue_postcommit_task')
def test_removed_contrib_files_checked_in(self, mock_enqueue, app, node, file):
write_contrib = AuthUserFactory()
node.add_contributor(write_contrib, permissions=WRITE)
node.save()
assert node.can_edit(user=write_contrib)
file.checkout = write_contrib
file.save()
assert file.is_checked_out
node.remove_contributor(write_contrib, auth=Auth(write_contrib))
mock_enqueue.assert_called_with(checkin_files_task, (node._id, write_contrib._id,), {}, celery=True)
def test_must_be_osfstorage(self, app, user, file, file_url):
file.recast(GithubFileNode._typedmodels_type)
file.save()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True, )
assert res.status_code == 403
def test_get_file_guids_misc(self, app, user, file, node):
# test_get_file_resolves_guids
guid = file.get_guid(create=True)
url = '/{}files/{}/'.format(API_BASE, guid._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert set(res.json.keys()) == {'meta', 'data'}
assert res.json['data']['attributes']['path'] == file.path
# test_get_file_invalid_guid_gives_404
url = '/{}files/{}/'.format(API_BASE, 'asdasasd')
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_get_file_non_file_guid_gives_404
url = '/{}files/{}/'.format(API_BASE, node._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_current_version_is_equal_to_length_of_history(
self, app, user, file_url, file):
res = app.get(file_url, auth=user.auth)
assert res.json['data']['attributes']['current_version'] == 1
for version in range(2, 4):
file.create_version(user, {
'object': '06d80e' + str(version),
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {'size': 1337,
'contentType': 'img/png'}).save()
res = app.get(file_url, auth=user.auth)
assert res.json['data']['attributes']['current_version'] == version
# Regression test for OSF-7758
def test_folder_files_relationships_contains_guid_not_id(
self, app, user, node):
folder = node.get_addon('osfstorage').get_root(
).append_folder('I\'d be a teacher!!')
folder.save()
folder_url = '/{}files/{}/'.format(API_BASE, folder._id)
res = app.get(folder_url, auth=user.auth)
split_href = res.json['data']['relationships']['files']['links']['related']['href'].split(
'/')
assert node._id in split_href
assert node.id not in split_href
def test_embed_user_on_quickfiles_detail(self, app, user):
quickfiles = QuickFilesNode.objects.get(creator=user)
osfstorage = quickfiles.get_addon('osfstorage')
root = osfstorage.get_root()
test_file = root.append_file('speedyfile.txt')
url = '/{}files/{}/?embed=user'.format(API_BASE, test_file._id)
res = app.get(url, auth=user.auth)
assert res.json['data'].get('embeds', None)
assert res.json['data']['embeds'].get('user')
assert res.json['data']['embeds']['user']['data']['id'] == user._id
@pytest.mark.django_db
class TestFileVersionView:
@pytest.fixture()
def node(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def osfstorage(self, node):
return node.get_addon('osfstorage')
@pytest.fixture()
def root_node(self, osfstorage):
return osfstorage.get_root()
@pytest.fixture()
def file(self, root_node, user):
file = root_node.append_file('test_file')
file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
return file
def test_listing(self, app, user, file):
file.create_version(user, {
'object': '0683m38e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1347,
'contentType': 'img/png'
}).save()
res = app.get(
'/{}files/{}/versions/'.format(API_BASE, file._id),
auth=user.auth,
)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['id'] == '2'
assert res.json['data'][0]['attributes']['name'] == file.name
assert res.json['data'][1]['id'] == '1'
assert res.json['data'][1]['attributes']['name'] == file.name
def test_load_and_property(self, app, user, file):
# test_by_id
res = app.get(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
auth=user.auth,
)
assert res.status_code == 200
assert res.json['data']['id'] == '1'
mfr_url = get_mfr_url(file, 'osfstorage')
render_link = res.json['data']['links']['render']
download_link = res.json['data']['links']['download']
assert mfr_url in render_link
assert download_link in render_link
assert 'revision=1' in render_link
guid = file.get_guid(create=True)._id
res = app.get(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
auth=user.auth,
)
render_link = res.json['data']['links']['render']
download_link = res.json['data']['links']['download']
assert mfr_url in render_link
assert download_link in render_link
assert guid in render_link
assert 'revision=1' in render_link
# test_read_only
assert app.put(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
expect_errors=True, auth=user.auth,
).status_code == 405
assert app.post(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
expect_errors=True, auth=user.auth,
).status_code == 405
assert app.delete(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
expect_errors=True, auth=user.auth,
).status_code == 405
@pytest.mark.django_db
class TestFileTagging:
@pytest.fixture()
def node(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def file_one(self, user, node):
return api_utils.create_test_file(
node, user, filename='file_one')
@pytest.fixture()
def payload(self, file_one):
payload = {
'data': {
'type': 'files',
'id': file_one._id,
'attributes': {
'checkout': None,
'tags': ['goofy']
}
}
}
return payload
@pytest.fixture()
def url(self, file_one):
return '/{}files/{}/'.format(API_BASE, file_one._id)
def test_tags_add_and_update_properly(self, app, user, url, payload):
# test_tags_add_properly
res = app.put_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
# Ensure adding tag data is correct from the PUT response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'goofy'
# test_tags_update_properly
# Ensure removing and adding tag data is correct from the PUT response
payload['data']['attributes']['tags'] = ['goofier']
res = app.put_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'goofier'
def test_tags_add_and_remove_properly(self, app, user, url, payload):
app.put_json_api(url, payload, auth=user.auth)
payload['data']['attributes']['tags'] = []
res = app.put_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_put_wo_tags_doesnt_remove_tags(self, app, user, url, payload):
app.put_json_api(url, payload, auth=user.auth)
payload['data']['attributes'] = {'checkout': None}
res = app.put_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
# Ensure adding tag data is correct from the PUT response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'goofy'
def test_add_and_remove_tag_adds_log(self, app, user, url, payload, node):
# test_add_tag_adds_log
count = node.logs.count()
app.put_json_api(url, payload, auth=user.auth)
assert node.logs.count() == count + 1
assert NodeLog.FILE_TAG_ADDED == node.logs.latest().action
# test_remove_tag_adds_log
payload['data']['attributes']['tags'] = []
count = node.logs.count()
app.put_json_api(url, payload, auth=user.auth)
assert node.logs.count() == count + 1
assert NodeLog.FILE_TAG_REMOVED == node.logs.latest().action
@pytest.mark.django_db
class TestPreprintFileView:
@pytest.fixture()
def preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def primary_file(self, preprint):
return preprint.primary_file
@pytest.fixture()
def file_url(self, primary_file):
return '/{}files/{}/'.format(API_BASE, primary_file._id)
@pytest.fixture()
def other_user(self):
return AuthUserFactory()
def test_published_preprint_file(self, app, file_url, preprint, user, other_user):
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 200
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 200
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 200
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_unpublished_preprint_file(self, app, file_url, preprint, user, other_user):
preprint.is_published = False
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 200
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_private_preprint_file(self, app, file_url, preprint, user, other_user):
preprint.is_public = False
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 200
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_deleted_preprint_file(self, app, file_url, preprint, user, other_user):
preprint.deleted = timezone.now()
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 410
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 410
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 410
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 410
def test_abandoned_preprint_file(self, app, file_url, preprint, user, other_user):
preprint.machine_state = DefaultStates.INITIAL.value
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_withdrawn_preprint_files(self, app, file_url, preprint, user, other_user):
preprint.date_withdrawn = timezone.now()
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
|
avanov/django | refs/heads/master | tests/aggregation/tests.py | 14 | from __future__ import unicode_literals
import datetime
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Aggregate, Avg, Count, DecimalField, DurationField, FloatField, Func,
IntegerField, Max, Min, Sum, Value,
)
from django.test import TestCase, ignore_warnings
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from .models import Author, Book, Publisher, Store
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1))
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2))
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = Book.objects.annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = Book.objects.select_related('contact').annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Avg('duration', output_field=DurationField())),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum('duration', output_field=DurationField())),
{'duration__sum': datetime.timedelta(days=3)}
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distict() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[5, 6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum('age'))
self.assertEqual(age_sum['age__sum'], 103)
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
).exclude(earliest_book=None).order_by("earliest_book").values(
'earliest_book',
'num_awards',
'id',
'name',
)
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Check that doing exclude() on a foreign model after annotate()
doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Check that aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE
or select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(f[1][0] for f in forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i), name="none", pages=10, rating=4.0,
price=9999.98, contact=a1, publisher=p1, pubdate=thedate)
book = Book.objects.aggregate(price_sum=Sum('price'))
self.assertEqual(book['price_sum'], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with six.assertRaisesRegex(self, TypeError, 'fail is not an aggregate expression'):
Book.objects.aggregate(fail=F('price'))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_missing_output_field_raises_error(self):
with six.assertRaisesRegex(self, FieldError, 'Cannot resolve expression type, unknown output_field'):
Book.objects.annotate(val=Max(2)).first()
def test_annotation_expressions(self):
authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
for qs in (authors, authors2):
self.assertEqual(len(qs), 9)
self.assertQuerysetEqual(
qs, [
('Adrian Holovaty', 132),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 129),
('James Bennett', 63),
('Jeffrey Forcier', 128),
('Paul Bissex', 120),
('Peter Norvig', 103),
('Stuart Russell', 103),
('Wesley J. Chun', 176)
],
lambda a: (a.name, a.combined_ages)
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
a3 = Author.objects.aggregate(av_age=Avg('age'))
self.assertEqual(a1, {'av_age': 37})
self.assertEqual(a2, {'av_age': 37})
self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price']
self.assertIsInstance(v, float)
self.assertEqual(v, Approximate(47.39, places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)})
def test_combine_different_types(self):
with six.assertRaisesRegex(self, FieldError, 'Expression contains mixed types. You must set output_field'):
Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')).get(pk=self.b4.pk)
b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField())).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=FloatField())).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=DecimalField())).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with six.assertRaisesRegex(self, TypeError, 'Complex annotations require an alias'):
Author.objects.annotate(Sum(F('age') + F('friends__age')))
with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum('age') / Count('age'))
with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(
combined_ages=Sum(F('age') + F('friends__age')))
age = qs.aggregate(max_combined_age=Max('combined_ages'))
self.assertEqual(age['max_combined_age'], 176)
age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age=Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age'], 954)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
qs = qs.annotate(friend_count=Count('friends'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['friend_count'], 2)
qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(
name="Adrian Holovaty").order_by('-combined_age')
self.assertEqual(
list(qs), [
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 63
}
]
)
vals = qs.values('name', 'combined_age')
self.assertEqual(
list(vals), [
{
"name": 'Adrian Holovaty',
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"combined_age": 63
}
]
)
def test_annotate_values_aggregate(self):
alias_age = Author.objects.annotate(
age_alias=F('age')
).values(
'age_alias',
).aggregate(sum_age=Sum('age_alias'))
age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
self.assertEqual(alias_age['sum_age'], age['sum_age'])
def test_annotate_over_annotate(self):
author = Author.objects.annotate(
age_alias=F('age')
).annotate(
sum_age=Sum('age_alias')
).get(name="Adrian Holovaty")
other_author = Author.objects.annotate(
sum_age=Sum('age')
).get(name="Adrian Holovaty")
self.assertEqual(author.sum_age, other_author.sum_age)
def test_annotated_aggregate_over_annotated_aggregate(self):
with six.assertRaisesRegex(self, FieldError, "Cannot compute Sum\('id__max'\): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = dict(function=self.function.lower(), expressions=sql)
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, 'as_' + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra['function'] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, 'as_' + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = dict(function='MAX', expressions='2')
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, 'as_' + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('MAX('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values('rating').aggregate(
double_max_rating=Max('rating') + Max('rating'))
self.assertEqual(max_rating['double_max_rating'], 5 * 2)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id') + 5
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3 + 5})
def test_expression_on_aggregation(self):
# Create a plain expression
class Greatest(Func):
function = 'GREATEST'
def as_sqlite(self, compiler, connection):
return super(Greatest, self).as_sql(compiler, connection, function='MAX')
qs = Publisher.objects.annotate(
price_or_median=Greatest(Avg('book__rating'), Avg('book__price'))
).filter(price_or_median__gte=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = Publisher.objects.annotate(
rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
output_field=FloatField())
).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs2, [1, 3], lambda v: v.num_awards)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_backwards_compatibility(self):
from django.db.models.sql import aggregates as sql_aggregates
class SqlNewSum(sql_aggregates.Aggregate):
sql_function = 'SUM'
class NewSum(Aggregate):
name = 'Sum'
def add_to_query(self, query, alias, col, source, is_summary):
klass = SqlNewSum
aggregate = klass(
col, source=source, is_summary=is_summary, **self.extra)
query.annotations[alias] = aggregate
qs = Author.objects.values('name').annotate(another_age=NewSum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
|
frankxu2004/WeixinCrawler | refs/heads/master | crawler/bs4/builder/_htmlparser.py | 412 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import (
HTMLParser,
HTMLParseError,
)
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
if data.endswith("?") and data.lower().startswith("xml"):
# "An XHTML processing instruction using the trailing '?'
# will cause the '?' to be included in data." - HTMLParser
# docs.
#
# Strip the question mark so we don't end up with two
# question marks.
data = data[:-1]
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
|
payamsm/django-allauth | refs/heads/master | allauth/socialaccount/providers/tumblr/__init__.py | 85 | __author__ = 'jshedd'
|
amvtek/Djam | refs/heads/master | djam/utils.py | 1 | # -*- coding: utf-8 -*-
"""
djam.utils
~~~~~~~~~~
:email: devel@amvtek.com
"""
from __future__ import unicode_literals, division
import os, re, hashlib, threading, string
from binascii import hexlify
from django.conf import settings
from django.utils import six
class SharedStateBase(object):
"Allow all instances to 'reliably' share variables"
# Use the 'Borg pattern' to share state between all instances.
# See source code of django.db.models.loading for example...
__shared_state = dict(
# instances may use this to share variables on a per thread basis
_local=threading.local(),
)
def __init__(self):
"I shall always be called"
self.__dict__ = self.__shared_state
class SettingRename(object):
def __init__(self, settingFmt):
self.settingFmt = settingFmt
def __call__(self, name):
"""
Check if a setting has been defined to overwrite name
returns setting if it exist or name...
"""
setting4name = self.settingFmt(name.upper())
overwrite = getattr(settings, setting4name, None)
return overwrite or name
def calculate_password_verifier(password, seed=None, hmethod='sha1'):
# make sure password is not empty
password = password.strip()
if not password:
raise ValueError("can not use empty password")
h = getattr(hashlib, hmethod)()
if seed is None:
seed = hexlify(os.urandom(h.digest_size))
h.update(seed)
h.update(password)
return "$".join([hmethod, seed, h.hexdigest()])
class FolderLoader(object):
"Provide a callable that load file content from a Folder"
def __init__(self, refFileName):
self.baseFolder = os.path.dirname(os.path.abspath(refFileName))
if not os.path.exists(self.baseFolder):
raise ValueError(
"Unable to resolve containing folder for %s" % refFileName
)
def load(self, filename):
f = open(os.path.join(self.baseFolder, filename), 'r')
return f.read()
def get_cbv_object(viewfunc):
"""
If viewfunc has been obtained using CBV.as_view(**initkwargs) factory
returns instance of CBV that viewfunc will construct each time it is called
"""
if getattr(viewfunc, '__closure__', None) is None:
# viewfunc has not been constructed using CBV
return
try:
# We assume that viewfunc was returned by CBV.as_view(**initkwargs)
# we try to retrieve CBV class & initkwargs
#
# this approach is **fragile** as it rely on inner variable names,
# used in base as_view implementation
ctx = dict(zip(view.__code__.co_freevars,
[c.cell_contents for c in (view.__closure__ or [])]
))
initkwargs = ctx.get('initkwargs') or {}
CBV = ctx.get('cls')
if callable(CBV):
return CBV(**initkwargs)
except:
return None
_STEPS = range(4, 0, -1) # cache possible formatting steps
_SEPARATORS = string.whitespace + "_-"
def r2h(rawId, sep=" "):
"""
return readability enhanced identifier
by inserting a separator every n characters
"""
rId = str(rawId).strip()
lId = len(rId)
for s in _STEPS:
if lId % s == 0:
break
if s == 1:
return rId
buf = six.StringIO(rId)
parts = [buf.read(s) for i in range(lId // s)]
return sep.join(parts)
if six.PY2:
from django.utils.encoding import force_bytes, force_text
translate = string.translate
def h2r(humId):
"""
remove formatting separators from readability enhanced identifier
"""
return force_text(translate(force_bytes(humId), None, _SEPARATORS))
else:
# TODO: works on Python 3 support...
def h2r(humId):
raise NotImplementedError("no implementation for Python 3")
|
julienr/vispy | refs/heads/master | vispy/geometry/parametric.py | 21 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from .normals import normals
def surface(func, umin=0, umax=2 * np.pi, ucount=64, urepeat=1.0,
vmin=0, vmax=2 * np.pi, vcount=64, vrepeat=1.0):
"""
Computes the parameterization of a parametric surface
func: function(u,v)
Parametric function used to build the surface
"""
vtype = [('position', np.float32, 3),
('texcoord', np.float32, 2),
('normal', np.float32, 3)]
itype = np.uint32
# umin, umax, ucount = 0, 2*np.pi, 64
# vmin, vmax, vcount = 0, 2*np.pi, 64
vcount += 1
ucount += 1
n = vcount * ucount
Un = np.repeat(np.linspace(0, 1, ucount, endpoint=True), vcount)
Vn = np.tile(np.linspace(0, 1, vcount, endpoint=True), ucount)
U = umin + Un * (umax - umin)
V = vmin + Vn * (vmax - vmin)
vertices = np.zeros(n, dtype=vtype)
for i, (u, v) in enumerate(zip(U, V)):
vertices["position"][i] = func(u, v)
vertices["texcoord"][:, 0] = Un * urepeat
vertices["texcoord"][:, 1] = Vn * vrepeat
indices = []
for i in range(ucount - 1):
for j in range(vcount - 1):
indices.append(i * (vcount) + j)
indices.append(i * (vcount) + j + 1)
indices.append(i * (vcount) + j + vcount + 1)
indices.append(i * (vcount) + j + vcount)
indices.append(i * (vcount) + j + vcount + 1)
indices.append(i * (vcount) + j)
indices = np.array(indices, dtype=itype)
vertices["normal"] = normals(vertices["position"],
indices.reshape(len(indices) / 3, 3))
return vertices, indices
|
nacc/cobbler | refs/heads/master | tests/profile_test.py | 3 | """
new_profile.py defines a set of methods designed for testing Cobbler
profiles.
Copyright 2009, Red Hat, Inc and Others
Steve Salevan <ssalevan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import urllib2
from base import *
class ProfileTests(CobblerTest):
def test_new_working_profile_basic(self):
"""
Attempt to create a Cobbler profile.
"""
distro_name = self.create_distro()[1]
profile_name = self.create_profile(distro_name)[1]
self.assertTrue(self.api.find_profile({'name': profile_name}) != [])
def test_new_nonworking_profile(self):
"""
Attempts to create a profile lacking required information.
"""
did = self.api.new_profile(self.token)
self.api.modify_profile(did, "name", "anythinggoes", self.token)
self.assertRaises(xmlrpclib.Fault, self.api.save_profile, did, self.token)
def test_getks_no_such_profile(self):
url = "http://%s/cblr/svc/op/ks/profile/%s" % (cfg['cobbler_server'],
"doesnotexist")
try:
response = urllib2.urlopen(url)
self.fail()
except urllib2.HTTPError, e:
self.assertEquals(404, e.code)
|
andrewgailey/robogen | refs/heads/master | robogen/rgkit/gamestate.py | 2 | from __future__ import division
import random
from collections import defaultdict
from rgkit import rg
from rgkit.settings import settings, AttrDict
class GameState(object):
def __init__(self, use_start=False, turn=0,
next_robot_id=0, seed=None, symmetric=True):
if seed is None:
seed = random.randint(0, settings.max_seed)
self._seed = str(seed)
self._spawn_random = random.Random(self._seed + 's')
self._attack_random = random.Random(self._seed + 'a')
self.robots = {}
self.turn = turn
self._next_robot_id = next_robot_id
if use_start and settings.start is not None:
for i, start in enumerate(settings.start):
for loc in start:
self.add_robot(loc, i)
self.symmetric = symmetric
if symmetric:
assert settings.player_count == 2
self._get_spawn_locations = self._get_spawn_locations_symmetric
else:
self._get_spawn_locations = self._get_spawn_locations_random
def add_robot(self, loc, player_id, hp=None, robot_id=None):
if hp is None:
hp = settings.robot_hp
if robot_id is None:
robot_id = self._next_robot_id
self._next_robot_id += 1
self.robots[loc] = AttrDict({
'location': loc,
'hp': hp,
'player_id': player_id,
'robot_id': robot_id
})
def remove_robot(self, loc):
if self.is_robot(loc):
del self.robots[loc]
def is_robot(self, loc):
return loc in self.robots
def _get_spawn_locations_symmetric(self):
def symmetric_loc(loc):
return (settings.board_size - 1 - loc[0],
settings.board_size - 1 - loc[1])
locs1 = []
locs2 = []
while len(locs1) < settings.spawn_per_player:
loc = self._spawn_random.choice(settings.spawn_coords)
sloc = symmetric_loc(loc)
if loc not in locs1 and loc not in locs2:
if sloc not in locs1 and sloc not in locs2:
locs1.append(loc)
locs2.append(sloc)
return locs1 + locs2
def _get_spawn_locations_random(self):
# see http://stackoverflow.com/questions/2612648/reservoir-sampling
locations = []
per_player = settings.spawn_per_player
count = per_player * settings.player_count
n = 0
for loc in settings.spawn_coords:
n += 1
if len(locations) < count:
locations.append(loc)
else:
s = int(self._spawn_random.random() * n)
if s < count:
locations[s] = loc
self._spawn_random.shuffle(locations)
return locations
def _get_contenders(self, dest):
"""
Generates a dict of locations, where the values correspond to the
set of bots that wish to move into that square or will be moving
into that square. This is because due to collisions a bot can
'use up' two squares:
1. the first is blocked because he attempted to move into it
2. the second is blocked because that is his current location
where he will be staying due to the collision at 1
:param dest: func(location of robot) = destination of robot
:returns: dict[destination] = set(locations of bots that either
want to move to 'destination' or
are moving to 'destination'
because of collisions)
"""
contenders = defaultdict(lambda: set())
def stuck(loc):
# Robot at loc is stuck
# Other robots trying to move in its old locations
# should be marked as stuck, too
old_contenders = contenders[loc]
contenders[loc] = set([loc])
for contender in old_contenders:
if contender != loc:
stuck(contender)
for loc in self.robots:
contenders[dest(loc)].add(loc)
for loc in self.robots:
if len(contenders[dest(loc)]) > 1 or (self.is_robot(dest(loc)) and
dest(loc) != loc and
dest(dest(loc)) == loc):
# Robot at loc is going to fail to move
stuck(loc)
return contenders
# new_locations = {loc: new_loc}
def _get_new_locations(self, dest, contenders):
new_locations = {}
for loc in self.robots:
if loc != dest(loc) and loc in contenders[loc]:
new_locations[loc] = loc
else:
new_locations[loc] = dest(loc)
return new_locations
# collisions = {loc: set(robots collided with robot at loc)}
def _get_collisions(self, dest, contenders):
collisions = defaultdict(lambda: set())
for loc in self.robots:
for loc2 in contenders[dest(loc)]:
collisions[loc].add(loc2)
collisions[loc2].add(loc)
return collisions
# damage_map = {loc: [actor_id: (actor_loc, damage)]}
# only counts potential attack and suicide damage
# self suicide damage is not counted
def _get_damage_map(self, actions):
damage_map = defaultdict(
lambda: [{} for _ in range(settings.player_count)])
for loc, robot in self.robots.items():
actor_id = robot.player_id
if actions[loc][0] == 'attack':
target = actions[loc][1]
damage = self._attack_random.randint(
*settings.attack_range)
damage_map[target][actor_id][loc] = damage
elif actions[loc][0] == 'suicide':
damage = settings.suicide_damage
for target in rg.locs_around(loc):
damage_map[target][actor_id][loc] = damage
return damage_map
@staticmethod
def _apply_damage_caused(delta, damage_caused):
for robot_delta in delta:
robot_delta.damage_caused += damage_caused[robot_delta.loc]
@staticmethod
def _apply_spawn(delta, spawn_locations):
# clear robots on spawn
for robot_delta in delta:
if robot_delta.loc_end in settings.spawn_coords:
robot_delta.hp_end = 0
# spawn robots
for i in range(settings.spawn_per_player):
for player_id in range(settings.player_count):
loc = spawn_locations[player_id*settings.spawn_per_player+i]
delta.append(AttrDict({
'loc': loc,
'hp': 0,
'player_id': player_id,
'loc_end': loc,
'hp_end': settings.robot_hp,
'damage_caused': 0
}))
# actions = {loc: action}
# all actions must be valid
# delta = [AttrDict{
# 'loc': loc,
# 'hp': hp,
# 'player_id': player_id,
# 'loc_end': loc_end,
# 'hp_end': hp_end
# 'damage_caused': damage_caused
# }]
def get_delta(self, actions, spawn=True):
delta = []
def dest(loc):
if actions[loc][0] == 'move':
return actions[loc][1]
else:
return loc
contenders = self._get_contenders(dest)
new_locations = self._get_new_locations(dest, contenders)
collisions = self._get_collisions(dest, contenders)
damage_map = self._get_damage_map(actions)
damage_caused = defaultdict(lambda: 0) # {loc: damage_caused}
for loc, robot in self.robots.items():
robot_delta = AttrDict({
'loc': loc,
'hp': robot.hp,
'player_id': robot.player_id,
'loc_end': new_locations[loc],
'hp_end': robot.hp, # to be adjusted
'damage_caused': 0 # to be adjusted
})
is_guard = actions[loc][0] == 'guard'
# collision damage
if not is_guard:
damage = settings.collision_damage
for other_loc in collisions[loc]:
if robot.player_id != self.robots[other_loc].player_id:
robot_delta.hp_end -= damage
damage_caused[other_loc] += damage
# attack and suicide damage
for player_id, player_damage_map in enumerate(
damage_map[new_locations[loc]]):
if player_id != robot.player_id:
for actor_loc, damage in player_damage_map.items():
if is_guard:
damage //= 2
robot_delta.hp_end -= damage
damage_caused[actor_loc] += damage
# account for suicides
if actions[loc][0] == 'suicide':
robot_delta.hp_end = 0
delta.append(robot_delta)
self._apply_damage_caused(delta, damage_caused)
if spawn and self.turn % settings.spawn_every == 0:
self._apply_spawn(delta, self._get_spawn_locations())
return delta
# delta = [AttrDict{
# 'loc': loc,
# 'hp': hp,
# 'player_id': player_id,
# 'loc_end': loc_end,
# 'hp_end': hp_end,
# 'damage_caused': damage_caused
# }]
# returns new GameState
def apply_delta(self, delta):
new_state = GameState(settings,
next_robot_id=self._next_robot_id,
turn=self.turn + 1,
seed=self._spawn_random.randint(
0, settings.max_seed),
symmetric=self.symmetric)
for delta_info in delta:
if delta_info.hp_end > 0:
loc = delta_info.loc
# is this a new robot?
if delta_info.hp > 0:
robot_id = self.robots[loc].robot_id
else:
robot_id = None
new_state.add_robot(delta_info.loc_end, delta_info.player_id,
delta_info.hp_end, robot_id)
return new_state
# actions = {loc: action}
# all actions must be valid
# returns new GameState
def apply_actions(self, actions, spawn=True):
delta = self.get_delta(actions, spawn)
return self.apply_delta(delta)
def get_scores(self):
scores = [0 for _ in range(settings.player_count)]
for robot in self.robots.values():
scores[robot.player_id] += 1
return scores
# export GameState to be used by a robot
def get_game_info(self, player_id):
game_info = AttrDict()
game_info.robots = dict((loc, AttrDict(robot))
for loc, robot in self.robots.items())
for robot in game_info.robots.values():
if robot.player_id != player_id:
del robot.robot_id
game_info.turn = self.turn
return game_info
|
0x0all/SASM | refs/heads/master | Windows/MinGW64/opt/lib/python2.7/encodings/gb18030.py | 816 | #
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb18030',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
googleapis/googleapis-gen | refs/heads/master | google/cloud/documentai/v1beta2/documentai-v1beta2-py/google/cloud/documentai/__init__.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.documentai_v1beta2.services.document_understanding_service.client import DocumentUnderstandingServiceClient
from google.cloud.documentai_v1beta2.services.document_understanding_service.async_client import DocumentUnderstandingServiceAsyncClient
from google.cloud.documentai_v1beta2.types.document import Document
from google.cloud.documentai_v1beta2.types.document_understanding import AutoMlParams
from google.cloud.documentai_v1beta2.types.document_understanding import BatchProcessDocumentsRequest
from google.cloud.documentai_v1beta2.types.document_understanding import BatchProcessDocumentsResponse
from google.cloud.documentai_v1beta2.types.document_understanding import EntityExtractionParams
from google.cloud.documentai_v1beta2.types.document_understanding import FormExtractionParams
from google.cloud.documentai_v1beta2.types.document_understanding import GcsDestination
from google.cloud.documentai_v1beta2.types.document_understanding import GcsSource
from google.cloud.documentai_v1beta2.types.document_understanding import InputConfig
from google.cloud.documentai_v1beta2.types.document_understanding import KeyValuePairHint
from google.cloud.documentai_v1beta2.types.document_understanding import OcrParams
from google.cloud.documentai_v1beta2.types.document_understanding import OperationMetadata
from google.cloud.documentai_v1beta2.types.document_understanding import OutputConfig
from google.cloud.documentai_v1beta2.types.document_understanding import ProcessDocumentRequest
from google.cloud.documentai_v1beta2.types.document_understanding import ProcessDocumentResponse
from google.cloud.documentai_v1beta2.types.document_understanding import TableBoundHint
from google.cloud.documentai_v1beta2.types.document_understanding import TableExtractionParams
from google.cloud.documentai_v1beta2.types.geometry import BoundingPoly
from google.cloud.documentai_v1beta2.types.geometry import NormalizedVertex
from google.cloud.documentai_v1beta2.types.geometry import Vertex
__all__ = ('DocumentUnderstandingServiceClient',
'DocumentUnderstandingServiceAsyncClient',
'Document',
'AutoMlParams',
'BatchProcessDocumentsRequest',
'BatchProcessDocumentsResponse',
'EntityExtractionParams',
'FormExtractionParams',
'GcsDestination',
'GcsSource',
'InputConfig',
'KeyValuePairHint',
'OcrParams',
'OperationMetadata',
'OutputConfig',
'ProcessDocumentRequest',
'ProcessDocumentResponse',
'TableBoundHint',
'TableExtractionParams',
'BoundingPoly',
'NormalizedVertex',
'Vertex',
)
|
xuweiliang/Codelibrary | refs/heads/master | nova/network/rpcapi.py | 4 | # Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the network RPC API.
"""
import oslo_messaging as messaging
from oslo_serialization import jsonutils
import nova.conf
from nova import exception
from nova.objects import base as objects_base
from nova import rpc
CONF = nova.conf.CONF
class NetworkAPI(object):
'''Client side of the network rpc API.
API version history:
* 1.0 - Initial version.
* 1.1 - Adds migrate_instance_[start|finish]
* 1.2 - Make migrate_instance_[start|finish] a little more flexible
* 1.3 - Adds fanout cast update_dns for multi_host networks
* 1.4 - Add get_backdoor_port()
* 1.5 - Adds associate
* 1.6 - Adds instance_uuid to _{dis,}associate_floating_ip
* 1.7 - Adds method get_floating_ip_pools to replace get_floating_pools
* 1.8 - Adds macs to allocate_for_instance
* 1.9 - Adds rxtx_factor to [add|remove]_fixed_ip, removes
instance_uuid from allocate_for_instance and
instance_get_nw_info
... Grizzly supports message version 1.9. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.9.
* 1.10- Adds (optional) requested_networks to deallocate_for_instance
... Havana supports message version 1.10. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.10.
* NOTE: remove unused method get_vifs_by_instance()
* NOTE: remove unused method get_vif_by_mac_address()
* NOTE: remove unused method get_network()
* NOTE: remove unused method get_all_networks()
* 1.11 - Add instance to deallocate_for_instance().
Remove instance_id, project_id, and host.
* 1.12 - Add instance to deallocate_fixed_ip()
... Icehouse supports message version 1.12. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.12.
* 1.13 - Convert allocate_for_instance()
to use NetworkRequestList objects
... Juno and Kilo supports message version 1.13. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.13.
* NOTE: remove unused method get_floating_ips_by_fixed_address()
* NOTE: remove unused method get_instance_uuids_by_ip_filter()
* NOTE: remove unused method disassociate_network()
* NOTE: remove unused method get_fixed_ip()
* NOTE: remove unused method get_fixed_ip_by_address()
* NOTE: remove unused method get_floating_ip()
* NOTE: remove unused method get_floating_ip_pools()
* NOTE: remove unused method get_floating_ip_by_address()
* NOTE: remove unused method get_floating_ips_by_project()
* NOTE: remove unused method get_instance_id_by_floating_address()
* NOTE: remove unused method allocate_floating_ip()
* NOTE: remove unused method deallocate_floating_ip()
* NOTE: remove unused method associate_floating_ip()
* NOTE: remove unused method disassociate_floating_ip()
* NOTE: remove unused method associate()
* 1.14 - Add mac parameter to release_fixed_ip().
* 1.15 - Convert set_network_host() to use Network objects.
... Liberty supports message version 1.15. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.15.
* 1.16 - Transfer instance in addition to instance_id in
setup_networks_on_host
... Liberty supports message version 1.16. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.16.
* 1.17 - Add method release_dhcp()
'''
VERSION_ALIASES = {
'grizzly': '1.9',
'havana': '1.10',
'icehouse': '1.12',
'juno': '1.13',
'kilo': '1.13',
'liberty': '1.15',
'mitaka': '1.16',
}
def __init__(self, topic=None):
super(NetworkAPI, self).__init__()
topic = topic or CONF.network_topic
target = messaging.Target(topic=topic, version='1.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.network,
CONF.upgrade_levels.network)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, version_cap, serializer)
# TODO(russellb): Convert this to named arguments. It's a pretty large
# list, so unwinding it all is probably best done in its own patch so it's
# easier to review.
def create_networks(self, ctxt, **kwargs):
return self.client.call(ctxt, 'create_networks', **kwargs)
def delete_network(self, ctxt, uuid, fixed_range):
return self.client.call(ctxt, 'delete_network',
uuid=uuid, fixed_range=fixed_range)
def allocate_for_instance(self, ctxt, instance_id, project_id, host,
rxtx_factor, vpn, requested_networks, macs=None,
dhcp_options=None):
version = '1.13'
if not self.client.can_send_version(version):
version = '1.9'
if requested_networks:
requested_networks = requested_networks.as_tuples()
if CONF.multi_host:
cctxt = self.client.prepare(version=version, server=host)
else:
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'allocate_for_instance',
instance_id=instance_id, project_id=project_id,
host=host, rxtx_factor=rxtx_factor, vpn=vpn,
requested_networks=requested_networks,
macs=jsonutils.to_primitive(macs))
def deallocate_for_instance(self, ctxt, instance, requested_networks=None):
cctxt = self.client
kwargs = {}
if self.client.can_send_version('1.11'):
version = '1.11'
kwargs['instance'] = instance
kwargs['requested_networks'] = requested_networks
else:
if self.client.can_send_version('1.10'):
version = '1.10'
kwargs['requested_networks'] = requested_networks
else:
version = '1.0'
kwargs['host'] = instance.host
kwargs['instance_id'] = instance.uuid
kwargs['project_id'] = instance.project_id
if CONF.multi_host:
cctxt = cctxt.prepare(server=instance.host, version=version)
return cctxt.call(ctxt, 'deallocate_for_instance', **kwargs)
def release_dhcp(self, ctxt, host, dev, address, vif_address):
if self.client.can_send_version('1.17'):
cctxt = self.client.prepare(version='1.17', server=host)
return cctxt.call(ctxt, 'release_dhcp', dev=dev, address=address,
vif_address=vif_address)
else:
raise exception.RPCPinnedToOldVersion()
def add_fixed_ip_to_instance(self, ctxt, instance_id, rxtx_factor,
host, network_id):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'add_fixed_ip_to_instance',
instance_id=instance_id, rxtx_factor=rxtx_factor,
host=host, network_id=network_id)
def remove_fixed_ip_from_instance(self, ctxt, instance_id, rxtx_factor,
host, address):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'remove_fixed_ip_from_instance',
instance_id=instance_id, rxtx_factor=rxtx_factor,
host=host, address=address)
def add_network_to_project(self, ctxt, project_id, network_uuid):
return self.client.call(ctxt, 'add_network_to_project',
project_id=project_id,
network_uuid=network_uuid)
def get_instance_nw_info(self, ctxt, instance_id, rxtx_factor, host,
project_id):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'get_instance_nw_info',
instance_id=instance_id, rxtx_factor=rxtx_factor,
host=host, project_id=project_id)
def validate_networks(self, ctxt, networks):
return self.client.call(ctxt, 'validate_networks', networks=networks)
def get_dns_domains(self, ctxt):
return self.client.call(ctxt, 'get_dns_domains')
def add_dns_entry(self, ctxt, address, name, dns_type, domain):
return self.client.call(ctxt, 'add_dns_entry',
address=address, name=name,
dns_type=dns_type, domain=domain)
def modify_dns_entry(self, ctxt, address, name, domain):
return self.client.call(ctxt, 'modify_dns_entry',
address=address, name=name, domain=domain)
def delete_dns_entry(self, ctxt, name, domain):
return self.client.call(ctxt, 'delete_dns_entry',
name=name, domain=domain)
def delete_dns_domain(self, ctxt, domain):
return self.client.call(ctxt, 'delete_dns_domain', domain=domain)
def get_dns_entries_by_address(self, ctxt, address, domain):
return self.client.call(ctxt, 'get_dns_entries_by_address',
address=address, domain=domain)
def get_dns_entries_by_name(self, ctxt, name, domain):
return self.client.call(ctxt, 'get_dns_entries_by_name',
name=name, domain=domain)
def create_private_dns_domain(self, ctxt, domain, av_zone):
return self.client.call(ctxt, 'create_private_dns_domain',
domain=domain, av_zone=av_zone)
def create_public_dns_domain(self, ctxt, domain, project):
return self.client.call(ctxt, 'create_public_dns_domain',
domain=domain, project=project)
def setup_networks_on_host(self, ctxt, instance_id, host, teardown,
instance):
# NOTE(tr3buchet): the call is just to wait for completion
version = '1.16'
kwargs = {}
if not self.client.can_send_version(version):
version = '1.0'
else:
kwargs['instance'] = instance
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'setup_networks_on_host',
instance_id=instance_id, host=host,
teardown=teardown, **kwargs)
def set_network_host(self, ctxt, network_ref):
version = '1.15'
if not self.client.can_send_version(version):
version = '1.0'
network_ref = objects_base.obj_to_primitive(network_ref)
cctxt = self.client.prepare(version=version)
return cctxt.call(ctxt, 'set_network_host', network_ref=network_ref)
def rpc_setup_network_on_host(self, ctxt, network_id, teardown, host):
# NOTE(tr3buchet): the call is just to wait for completion
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'rpc_setup_network_on_host',
network_id=network_id, teardown=teardown)
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _rpc_allocate_fixed_ip(self, ctxt, instance_id, network_id, address,
vpn, host):
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, '_rpc_allocate_fixed_ip',
instance_id=instance_id, network_id=network_id,
address=address, vpn=vpn)
def deallocate_fixed_ip(self, ctxt, address, host, instance):
kwargs = {}
if self.client.can_send_version('1.12'):
version = '1.12'
kwargs['instance'] = instance
else:
version = '1.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'deallocate_fixed_ip',
address=address, host=host, **kwargs)
def update_dns(self, ctxt, network_ids):
cctxt = self.client.prepare(fanout=True, version='1.3')
cctxt.cast(ctxt, 'update_dns', network_ids=network_ids)
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _associate_floating_ip(self, ctxt, floating_address, fixed_address,
interface, host, instance_uuid=None):
cctxt = self.client.prepare(server=host, version='1.6')
return cctxt.call(ctxt, '_associate_floating_ip',
floating_address=floating_address,
fixed_address=fixed_address,
interface=interface, instance_uuid=instance_uuid)
# NOTE(russellb): Ideally this would not have a prefix of '_' since it is
# a part of the rpc API. However, this is how it was being called when the
# 1.0 API was being documented using this client proxy class. It should be
# changed if there was ever a 2.0.
def _disassociate_floating_ip(self, ctxt, address, interface, host,
instance_uuid=None):
cctxt = self.client.prepare(server=host, version='1.6')
return cctxt.call(ctxt, '_disassociate_floating_ip',
address=address, interface=interface,
instance_uuid=instance_uuid)
def lease_fixed_ip(self, ctxt, address, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'lease_fixed_ip', address=address)
def release_fixed_ip(self, ctxt, address, host, mac):
kwargs = {}
if self.client.can_send_version('1.14'):
version = '1.14'
kwargs['mac'] = mac
else:
version = '1.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'release_fixed_ip', address=address, **kwargs)
def migrate_instance_start(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
cctxt = self.client.prepare(server=host, version='1.2')
return cctxt.call(ctxt, 'migrate_instance_start',
instance_uuid=instance_uuid,
rxtx_factor=rxtx_factor,
project_id=project_id,
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses)
def migrate_instance_finish(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
cctxt = self.client.prepare(server=host, version='1.2')
return cctxt.call(ctxt, 'migrate_instance_finish',
instance_uuid=instance_uuid,
rxtx_factor=rxtx_factor,
project_id=project_id,
source=source_compute,
dest=dest_compute,
floating_addresses=floating_addresses)
|
samthor/intellij-community | refs/heads/master | python/testData/quickFixes/RenameElementQuickFixTest/pep8Function_after.py | 62 | def a(): pass |
ausarbluhd/EternalLLC | refs/heads/master | scripts/mallory/src/config_if.py | 1 | """
This module interfaces with the getifaddrs libc function to return a list
of ethernet devices in a reliable fashion. This should be portable on
any POSIX compliant system. Only tested on Linux.
(grepping ifconfig is nasty)
"""
from ctypes import *
import subprocess
# Structures as defined by:
# http://www.kernel.org/doc/man-pages/online/pages/man3/getifaddrs.3.html
class ifa_ifu(Union):
_fields_ = [("ifu_broadaddr", c_void_p),
("ifu_dstaddr", c_void_p)
]
class ifaddrs(Structure):
_fields_ = [("ifa_next", c_void_p),
("ifa_name", c_char_p),
("ifa_flags", c_int),
("ifa_addr", c_void_p),
("ifa_netmask", c_void_p),
("ifa_ifu", ifa_ifu),
("ifa_data", c_void_p)
]
class ConfigInterfaces(object):
def __init__(self):
"""
This class is the model for the configured interfaces. It will hold
and store which interfaces have been selected for MiTM and which
interfaces has been selected to be the outbound interface. Currently
the model is a simple one.
MiTM interfaces can't be outbound. You get one outbound interface
at a time.
"""
self.interfaces = []
self.mitm_interfaces = []
self.outbound_interfaces = []
self.banned_interfaces = ['lo']
def set_interfaces(self, interfaces):
self.interfaces = interfaces
for interface in self.interfaces:
if interface in self.banned_interfaces:
self.interfaces.remove(interface)
def get_idx_for_if(self, interface):
return self.interfaces.index(interface)
def get_if_for_idx(self, index):
if index >= 0 and index < self.num_ifs():
return self.interfaces[index]
return ""
def get_outbound(self):
if len(self.outbound_interfaces) == 1:
return self.outbound_interfaces[0]
return None
def get_mitm(self):
return self.mitm_interfaces
def is_mitm(self, interface):
if interface in self.mitm_interfaces:
return True
else:
return False
def set_mitm(self, interface, state):
if interface not in self.interfaces:
return False
self.set_outbound(interface, False)
if state == False:
if interface in self.mitm_interfaces:
self.mitm_interfaces.remove(interface)
if state == True:
if interface not in self.mitm_interfaces:
self.mitm_interfaces.append(interface)
return True
def is_outbound(self, interface):
if interface in self.outbound_interfaces:
return True
else:
return False
def set_outbound(self, interface, state):
if interface not in self.interfaces:
return False
# Remove interface from MiTM list if setting as outbound
if interface in self.mitm_interfaces and state == True:
self.mitm_interfaces.remove(interface)
# Add to outbound list
if state == True:
self.outbound_interfaces = [interface]
return True
if state == False and self.is_outbound(interface):
self.outbound_interfaces.remove(interface)
return False
def num_ifs(self):
return len(self.interfaces)
def reset(self):
self.interfaces = []
self.mitm_interfaces = []
self.outbound_interfaces = []
def save(self):
"""
This method saves the configuration of the MiTM and Outbond interfaces
Note: Dire security implications here as we are calling a bunch of
shell commands with *potentially* untrusted input. The only real input
are network interface device names. We figure, if an attacker can get
a malicious interface name onto your system to sneak into these shell
commands you were already in trouble. Probably owned by an APT.
"""
cmds = []
# Turn on ip_forwarding. Linux only
cmds.append("echo 1 > /proc/sys/net/ipv4/ip_forward")
# Delete all iptables rules and set it to
cmds.append("iptables -F")
cmds.append("iptables -X")
cmds.append("iptables -t nat -F")
cmds.append("iptables -t nat -X")
cmds.append("iptables -t mangle -F")
cmds.append("iptables -t mangle -X")
cmds.append("iptables -P INPUT ACCEPT")
cmds.append("iptables -P FORWARD ACCEPT")
cmds.append("iptables -P OUTPUT ACCEPT")
# Turn on NAT on the outbound interfaces
cmds.append(
("iptables -t nat -A POSTROUTING -o "
"%s -j MASQUERADE") % self.outbound_interfaces[0]
)
for interface in self.get_mitm():
cmds.append( ("iptables -t nat -A PREROUTING -j REDIRECT -i "
"%s -p tcp -m tcp --to-ports 20755") % interface)
cmds.append( ("iptables -t nat -A PREROUTING -j REDIRECT -i "
"%s -p udp -m udp --to-ports 20755") % interface)
for cmd in cmds:
subprocess.call(cmd, shell=True)
print cmds
def __str__(self):
return ("ifs:%s, mitm_ifs:%s, outbound_ifs:%s"
% (self.interfaces, self.mitm_interfaces,
self.outbound_interfaces))
def test(self):
self.interfaces = ['eth1', 'eth2', 'ppp0']
testif = 'eth1'
self.set_mitm(testif, True)
if not self.is_mitm(testif):
print "Test Fail: MiTM Setting"
self.set_mitm(testif, False)
if self.is_mitm(testif):
print "Test Fail: MiTM Setting"
self.set_mitm(testif, True)
# Outbound interface test cases
print self
self.reset()
self.interfaces = ['eth1', 'eth2', 'ppp0']
self.set_mitm('eth2', True)
self.set_mitm('ppp0', True)
self.set_outbound('eth1', True)
print "OB Testing: '%s'" % self
self.set_outbound('eth2', True)
print "OB Testing 2: '%s'" % (self)
self.set_mitm('eth2', True)
print "OB Testing 3: '%s'" % (self)
self.set_mitm('eth1', False)
print "OB Testing 3: '%s'" % (self)
class NetworkInterfaces(object):
"""
This class provides a POSIX compliant method, using ctypes, to retrieve
the available network interfaces available to the OS.
"""
def __init__(self):
self.libc = cdll.LoadLibrary("libc.so.6")
self.getifaddrs = self.libc.getifaddrs
self.freeifaddrs = self.libc.freeifaddrs
#TODO Package this in a class
def get_ifs(self):
"""
Get a list of available interfaces
@return: array - networking interfaces
"""
ifa = self.getifaddrs_c()
ifa_orig = ifa
ifnames = {}
# Loop over linked list of devices
while True:
name = ifa.ifa_name
ifnames[name] = True
if ifa.ifa_next:
# ifa.ifa_next is just a pointer. Convert to ctypes from mem addr
ifa = ifaddrs.from_address(ifa.ifa_next)
else:
break
self.freeifaddrs(pointer(ifa_orig))
return ifnames
def getifaddrs_c(self):
"""
Interface with libc to call libc.getifaddrs.
@return: ctypes type instance (ifaddrs struct).
"""
ptr = c_void_p(None)
ifaddrs_struct_p = self.getifaddrs(pointer(ptr))
return ifaddrs.from_address(ptr.value)
if __name__ == "__main__":
ni = NetworkInterfaces()
print ni.get_ifs() |
pk400/catering | refs/heads/master | myvenv/lib/python3.4/site-packages/django/conf/locale/zh_Hans/formats.py | 1008 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
|
ortylp/scipy | refs/heads/master | scipy/fftpack/basic.py | 56 | """
Discrete Fourier Transforms - basic.py
"""
# Created by Pearu Peterson, August,September 2002
from __future__ import division, print_function, absolute_import
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2']
from numpy import zeros, swapaxes
import numpy
from . import _fftpack
import atexit
atexit.register(_fftpack.destroy_zfft_cache)
atexit.register(_fftpack.destroy_zfftnd_cache)
atexit.register(_fftpack.destroy_drfft_cache)
atexit.register(_fftpack.destroy_cfft_cache)
atexit.register(_fftpack.destroy_cfftnd_cache)
atexit.register(_fftpack.destroy_rfft_cache)
del atexit
def istype(arr, typeclass):
return issubclass(arr.dtype.type, typeclass)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
# XXX: single precision FFTs partially disabled due to accuracy issues
# for large prime-sized inputs.
#
# See http://permalink.gmane.org/gmane.comp.python.scientific.devel/13834
# ("fftpack test failures for 0.8.0b1", Ralf Gommers, 17 Jun 2010,
# @ scipy-dev)
#
# These should be re-enabled once the problems are resolved
def _is_safe_size(n):
"""
Is the size of FFT such that FFTPACK can handle it in single precision
with sufficient accuracy?
Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those
"""
n = int(n)
if n == 0:
return True
# Divide by 3 until you can't, then by 5 until you can't
for c in (3, 5):
while n % c == 0:
n //= c
# Return True if the remainder is a power of 2
return not n & (n-1)
def _fake_crfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.crfft(x, n, *a, **kw)
else:
return _fftpack.zrfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_cfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.cfft(x, n, *a, **kw)
else:
return _fftpack.zfft(x, n, *a, **kw).astype(numpy.complex64)
def _fake_rfft(x, n, *a, **kw):
if _is_safe_size(n):
return _fftpack.rfft(x, n, *a, **kw)
else:
return _fftpack.drfft(x, n, *a, **kw).astype(numpy.float32)
def _fake_cfftnd(x, shape, *a, **kw):
if numpy.all(list(map(_is_safe_size, shape))):
return _fftpack.cfftnd(x, shape, *a, **kw)
else:
return _fftpack.zfftnd(x, shape, *a, **kw).astype(numpy.complex64)
_DTYPE_TO_FFT = {
# numpy.dtype(numpy.float32): _fftpack.crfft,
numpy.dtype(numpy.float32): _fake_crfft,
numpy.dtype(numpy.float64): _fftpack.zrfft,
# numpy.dtype(numpy.complex64): _fftpack.cfft,
numpy.dtype(numpy.complex64): _fake_cfft,
numpy.dtype(numpy.complex128): _fftpack.zfft,
}
_DTYPE_TO_RFFT = {
# numpy.dtype(numpy.float32): _fftpack.rfft,
numpy.dtype(numpy.float32): _fake_rfft,
numpy.dtype(numpy.float64): _fftpack.drfft,
}
_DTYPE_TO_FFTN = {
# numpy.dtype(numpy.complex64): _fftpack.cfftnd,
numpy.dtype(numpy.complex64): _fake_cfftnd,
numpy.dtype(numpy.complex128): _fftpack.zfftnd,
# numpy.dtype(numpy.float32): _fftpack.cfftnd,
numpy.dtype(numpy.float32): _fake_cfftnd,
numpy.dtype(numpy.float64): _fftpack.zfftnd,
}
def _asfarray(x):
"""Like numpy asfarray, except that it does not modify x dtype if x is
already an array with a float dtype, and do not cast complex types to
real."""
if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]:
return x
else:
# We cannot use asfarray directly because it converts sequences of
# complex to sequence of real
ret = numpy.asarray(x)
if ret.dtype.char not in numpy.typecodes["AllFloat"]:
return numpy.asfarray(x)
return ret
def _fix_shape(x, n, axis):
""" Internal auxiliary function for _raw_fft, _raw_fftnd."""
s = list(x.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
x = x[index]
return x, False
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s,x.dtype.char)
z[index] = x
return z, True
def _raw_fft(x, n, axis, direction, overwrite_x, work_function):
""" Internal auxiliary function for fft, ifft, rfft, irfft."""
if n is None:
n = x.shape[axis]
elif n != x.shape[axis]:
x, copy_made = _fix_shape(x,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(x.shape)-1:
r = work_function(x,n,direction,overwrite_x=overwrite_x)
else:
x = swapaxes(x, axis, -1)
r = work_function(x,n,direction,overwrite_x=overwrite_x)
r = swapaxes(r, axis, -1)
return r
def fft(x, n=None, axis=-1, overwrite_x=False):
"""
Return discrete Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)`` where
``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``.
Parameters
----------
x : array_like
Array to Fourier transform.
n : int, optional
Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the fft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
z : complex ndarray
with the elements::
[y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even
[y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd
where::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
Note that ``y(-j) = y(n-j).conjugate()``.
See Also
--------
ifft : Inverse FFT
rfft : FFT of a real sequence
Notes
-----
The packing of the result is "standard": If ``A = fft(a, n)``, then
``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the
positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency
terms, in order of decreasingly negative frequency. So for an 8-point
transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1].
To rearrange the fft output so that the zero-frequency component is
centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`.
For `n` even, ``A[n/2]`` contains the sum of the positive and
negative-frequency terms. For `n` even and `x` real, ``A[n/2]`` will
always be real.
This function is most efficient when `n` is a power of two, and least
efficient when `n` is prime.
If the data type of `x` is real, a "real FFT" algorithm is automatically
used, which roughly halves the computation time. To increase efficiency
a little further, use `rfft`, which does the same calculation, but only
outputs half of the symmetrical spectrum. If the data is both real and
symmetrical, the `dct` can again double the efficiency, by generating
half of the spectrum from half of the signal.
Examples
--------
>>> from scipy.fftpack import fft, ifft
>>> x = np.arange(5)
>>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy.
True
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,1,0,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,1,0,overwrite_x)
return swapaxes(tmp, axis, -1)
def ifft(x, n=None, axis=-1, overwrite_x=False):
"""
Return discrete inverse Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)`` where
``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform. If ``n < x.shape[axis]``,
`x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
The default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the ifft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
ifft : ndarray of floats
The inverse discrete Fourier transform.
See Also
--------
fft : Forward FFT
Notes
-----
This function is most efficient when `n` is a power of two, and least
efficient when `n` is prime.
If the data type of `x` is real, a "real IFFT" algorithm is automatically
used, which roughly halves the computation time.
"""
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made = _fix_shape(tmp,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(tmp.shape) - 1:
return work_function(tmp,n,-1,1,overwrite_x)
tmp = swapaxes(tmp, axis, -1)
tmp = work_function(tmp,n,-1,1,overwrite_x)
return swapaxes(tmp, axis, -1)
def rfft(x, n=None, axis=-1, overwrite_x=False):
"""
Discrete Fourier transform of a real sequence.
Parameters
----------
x : array_like, real-valued
The data to transform.
n : int, optional
Defines the length of the Fourier transform. If `n` is not specified
(the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``,
`x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded.
axis : int, optional
The axis along which the transform is applied. The default is the
last axis.
overwrite_x : bool, optional
If set to true, the contents of `x` can be overwritten. Default is
False.
Returns
-------
z : real ndarray
The returned real array contains::
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd
where::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n)
j = 0..n-1
Note that ``y(-j) == y(n-j).conjugate()``.
See Also
--------
fft, irfft, scipy.fftpack.basic
Notes
-----
Within numerical accuracy, ``y == rfft(irfft(y))``.
Examples
--------
>>> from scipy.fftpack import fft, rfft
>>> a = [9, -9, 1, 3]
>>> fft(a)
array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j])
>>> rfft(a)
array([ 4., 8., 12., 16.])
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,1,overwrite_x,work_function)
def irfft(x, n=None, axis=-1, overwrite_x=False):
"""
Return inverse discrete Fourier transform of real sequence x.
The contents of `x` are interpreted as the output of the `rfft`
function.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform.
If n < x.shape[axis], x is truncated.
If n > x.shape[axis], x is zero-padded.
The default results in n = x.shape[axis].
axis : int, optional
Axis along which the ifft's are computed; the default is over
the last axis (i.e., axis=-1).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
irfft : ndarray of floats
The inverse discrete Fourier transform.
See Also
--------
rfft, ifft
Notes
-----
The returned real array contains::
[y(0),y(1),...,y(n-1)]
where for n is even::
y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0] + (-1)**(j) x[n-1])
and for n is odd::
y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0])
c.c. denotes complex conjugate of preceding expression.
For details on input parameters, see `rfft`.
"""
tmp = _asfarray(x)
if not numpy.isrealobj(tmp):
raise TypeError("1st argument must be real sequence")
try:
work_function = _DTYPE_TO_RFFT[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fft(tmp,n,axis,-1,overwrite_x,work_function)
def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function):
""" Internal auxiliary function for fftnd, ifftnd."""
if s is None:
if axes is None:
s = x.shape
else:
s = numpy.take(x.shape, axes)
s = tuple(s)
if axes is None:
noaxes = True
axes = list(range(-x.ndim, 0))
else:
noaxes = False
if len(axes) != len(s):
raise ValueError("when given, axes and shape arguments "
"have to be of the same length")
for dim in s:
if dim < 1:
raise ValueError("Invalid number of FFT data points "
"(%s) specified." % (s,))
# No need to swap axes, array is in C order
if noaxes:
for i in axes:
x, copy_made = _fix_shape(x, s[i], i)
overwrite_x = overwrite_x or copy_made
return work_function(x,s,direction,overwrite_x=overwrite_x)
# We ordered axes, because the code below to push axes at the end of the
# array assumes axes argument is in ascending order.
id = numpy.argsort(axes)
axes = [axes[i] for i in id]
s = [s[i] for i in id]
# Swap the request axes, last first (i.e. First swap the axis which ends up
# at -1, then at -2, etc...), such as the request axes on which the
# operation is carried become the last ones
for i in range(1, len(axes)+1):
x = numpy.swapaxes(x, axes[-i], -i)
# We can now operate on the axes waxes, the p last axes (p = len(axes)), by
# fixing the shape of the input array to 1 for any axis the fft is not
# carried upon.
waxes = list(range(x.ndim - len(axes), x.ndim))
shape = numpy.ones(x.ndim)
shape[waxes] = s
for i in range(len(waxes)):
x, copy_made = _fix_shape(x, s[i], waxes[i])
overwrite_x = overwrite_x or copy_made
r = work_function(x, shape, direction, overwrite_x=overwrite_x)
# reswap in the reverse order (first axis first, etc...) to get original
# order
for i in range(len(axes), 0, -1):
r = numpy.swapaxes(r, -i, axes[-i])
return r
def fftn(x, shape=None, axes=None, overwrite_x=False):
"""
Return multidimensional discrete Fourier transform.
The returned array contains::
y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
where d = len(x.shape) and n = x.shape.
Note that ``y[..., -j_i, ...] = y[..., n_i-j_i, ...].conjugate()``.
Parameters
----------
x : array_like
The (n-dimensional) array to transform.
shape : tuple of ints, optional
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
If ``shape[i] > x.shape[i]``, the i-th dimension is padded with zeros.
If ``shape[i] < x.shape[i]``, the i-th dimension is truncated to
length ``shape[i]``.
axes : array_like of ints, optional
The axes of `x` (`y` if `shape` is not None) along which the
transform is applied.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed. Default is False.
Returns
-------
y : complex-valued n-dimensional numpy array
The (n-dimensional) DFT of the input array.
See Also
--------
ifftn
Examples
--------
>>> from scipy.fftpack import fftn, ifftn
>>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
>>> np.allclose(y, fftn(ifftn(y)))
True
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, 1)
def _raw_fftn_dispatch(x, shape, axes, overwrite_x, direction):
tmp = _asfarray(x)
try:
work_function = _DTYPE_TO_FFTN[tmp.dtype]
except KeyError:
raise ValueError("type %s is not supported" % tmp.dtype)
if not (istype(tmp, numpy.complex64) or istype(tmp, numpy.complex128)):
overwrite_x = 1
overwrite_x = overwrite_x or _datacopied(tmp, x)
return _raw_fftnd(tmp,shape,axes,direction,overwrite_x,work_function)
def ifftn(x, shape=None, axes=None, overwrite_x=False):
"""
Return inverse multi-dimensional discrete Fourier transform of
arbitrary type sequence x.
The returned array contains::
y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i)
where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``.
For description of parameters see `fftn`.
See Also
--------
fftn : for detailed information.
"""
return _raw_fftn_dispatch(x, shape, axes, overwrite_x, -1)
def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete Fourier transform.
Return the two-dimensional discrete Fourier transform of the 2-D argument
`x`.
See Also
--------
fftn : for detailed information.
"""
return fftn(x,shape,axes,overwrite_x)
def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete inverse Fourier transform of real or complex sequence.
Return inverse two-dimensional discrete Fourier transform of
arbitrary type sequence x.
See `ifft` for more information.
See also
--------
fft2, ifft
"""
return ifftn(x,shape,axes,overwrite_x)
|
rmoorman/feedhq | refs/heads/master | feedhq/feeds/utils.py | 2 | # -*- coding: utf-8 -*-
import datetime
from django.utils import timezone
from rache import job_key, job_details
from .. import __version__
from ..utils import get_redis_connection
USER_AGENT = (
'FeedHQ/%s (https://github.com/feedhq/feedhq; %%s; https://github.com/'
'feedhq/feedhq/wiki/fetcher; like FeedFetcher-Google)'
) % __version__
FAVICON_FETCHER = USER_AGENT % 'favicon fetcher'
def is_feed(parsed):
return hasattr(parsed.feed, 'title')
def epoch_to_utc(value):
"""Converts epoch (in seconds) values to a timezone-aware datetime."""
return timezone.make_aware(
datetime.datetime.fromtimestamp(value), timezone.utc)
class JobNotFound(Exception):
pass
def get_job(name):
redis = get_redis_connection()
key = job_key(name)
if not redis.exists(key):
raise JobNotFound
return job_details(name, connection=redis)
|
tjackiw/zaproxy | refs/heads/develop | python/api/src/zapv2/authentication.py | 15 | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2015 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
class authentication(object):
def __init__(self, zap):
self.zap = zap
@property
def get_supported_authentication_methods(self):
return next(self.zap._request(self.zap.base + 'authentication/view/getSupportedAuthenticationMethods/').itervalues())
def get_authentication_method_config_params(self, authmethodname):
return next(self.zap._request(self.zap.base + 'authentication/view/getAuthenticationMethodConfigParams/', {'authMethodName' : authmethodname}).itervalues())
def get_authentication_method(self, contextid):
return next(self.zap._request(self.zap.base + 'authentication/view/getAuthenticationMethod/', {'contextId' : contextid}).itervalues())
def get_logged_in_indicator(self, contextid):
return next(self.zap._request(self.zap.base + 'authentication/view/getLoggedInIndicator/', {'contextId' : contextid}).itervalues())
def get_logged_out_indicator(self, contextid):
return next(self.zap._request(self.zap.base + 'authentication/view/getLoggedOutIndicator/', {'contextId' : contextid}).itervalues())
def set_authentication_method(self, contextid, authmethodname, authmethodconfigparams='', apikey=''):
return next(self.zap._request(self.zap.base + 'authentication/action/setAuthenticationMethod/', {'contextId' : contextid, 'authMethodName' : authmethodname, 'authMethodConfigParams' : authmethodconfigparams, 'apikey' : apikey}).itervalues())
def set_logged_in_indicator(self, contextid, loggedinindicatorregex, apikey=''):
return next(self.zap._request(self.zap.base + 'authentication/action/setLoggedInIndicator/', {'contextId' : contextid, 'loggedInIndicatorRegex' : loggedinindicatorregex, 'apikey' : apikey}).itervalues())
def set_logged_out_indicator(self, contextid, loggedoutindicatorregex, apikey=''):
return next(self.zap._request(self.zap.base + 'authentication/action/setLoggedOutIndicator/', {'contextId' : contextid, 'loggedOutIndicatorRegex' : loggedoutindicatorregex, 'apikey' : apikey}).itervalues())
|
wang1986one/pyo | refs/heads/master | pyolib/server.py | 11 | # -*- coding: utf-8 -*-
"""
Copyright 2009-2015 Olivier Belanger
This file is part of pyo, a python module to help digital signal
processing script creation.
pyo is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
pyo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with pyo. If not, see <http://www.gnu.org/licenses/>.
"""
import os, time
from _core import *
from _widgets import createServerGUI
######################################################################
### Proxy of Server object
######################################################################
class Server(object):
"""
Main processing audio loop callback handler.
The Server object handles all communications with Portaudio and
Portmidi. It keeps track of all audio streams created as well as
connections between them.
An instance of the Server must be booted before defining any
signal processing chain.
:Args:
sr : int, optional
Sampling rate used by Portaudio and the Server to compute samples.
Defaults to 44100.
nchnls : int, optional
Number of output channels. The number of input channels will be the
same if `ichnls` argument is not defined. Defaults to 2.
buffersize : int, optional
Number of samples that Portaudio will request from the callback loop.
Defaults to 256.
This value has an impact on CPU use (a small buffer size is harder
to compute) and on the latency of the system.
Latency is `buffer size / sampling rate` in seconds.
duplex : int {0, 1}, optional
Input - output mode. 0 is output only and 1 is both ways.
Defaults to 1.
audio : string {'portaudio', 'pa', 'jack', 'coreaudio', 'offline', 'offline_nb}, optional
Audio backend to use. 'pa' is equivalent to 'portaudio'. Default is 'portaudio'.
'offline' save the audio output in a soundfile as fast as possible in blocking mode,
ie. the main program doesn't respond until the end of the computation.
'offline_nb' save the audio output in a soundfile as fast as possible in non-blocking
mode,
ie. the computation is executed in a separated thread, allowing the program to
respond while the computation goes on.
It is the responsibility of the user to make sure that the program doesn't exit before
the computation is done.
jackname : string, optional
Name of jack client. Defaults to 'pyo'
ichnls : int, optional
Number of input channels if different of output channels. If None (default), ichnls = nchnls.
.. note::
The following methods must be called **before** booting the server
- setInOutDevice(x) : Set both input and output devices. See `pa_list_devices()`.
- setInputDevice(x) : Set the audio input device number. See `pa_list_devices()`.
- setOutputDevice(x) : Set the audio output device number. See `pa_list_devices()`.
- setInputOffset(x) : Set the first physical input channel.
- setOutputOffset(x) : Set the first physical output channel.
- setInOutOffset(x) : Set the first physical input and output channels.
- setMidiInputDevice(x) : Set the MIDI input device number. See `pm_list_devices()`.
- setMidiOutputDevice(x) : Set the MIDI output device number. See `pm_list_devices()`.
- setSamplingRate(x) : Set the sampling rate used by the server.
- setBufferSize(x) : Set the buffer size used by the server.
- setNchnls(x) : Set the number of output (and input if `ichnls` = None) channels used by the server.
- setIchnls(x) : Set the number of input channels (if different of output channels) used by the server.
- setDuplex(x) : Set the duplex mode used by the server.
- setVerbosity(x) : Set the server's verbosity.
- reinit(sr, nchnls, buffersize, duplex, audio, jackname) : Reinit the server's settings.
>>> # For an 8 channels server in duplex mode with
>>> # a sampling rate of 48000 Hz and buffer size of 512
>>> s = Server(sr=48000, nchnls=8, buffersize=512, duplex=1).boot()
>>> s.start()
"""
def __init__(self, sr=44100, nchnls=2, buffersize=256, duplex=1,
audio='portaudio', jackname='pyo', ichnls=None):
if os.environ.has_key("PYO_SERVER_AUDIO") and "offline" not in audio and "embedded" not in audio:
audio = os.environ["PYO_SERVER_AUDIO"]
self._time = time
self._nchnls = nchnls
if ichnls == None:
self._ichnls = nchnls
else:
self._ichnls = ichnls
self._amp = 1.
self._verbosity = 7
self._startoffset = 0
self._dur = -1
self._filename = None
self._fileformat = 0
self._sampletype = 0
self._server = Server_base(sr, nchnls, buffersize, duplex, audio, jackname, self._ichnls)
self._server._setDefaultRecPath(os.path.join(os.path.expanduser("~"), "pyo_rec.wav"))
def __del__(self):
self.setTime = None
self.setRms = None
if self.getIsBooted():
if self.getIsStarted():
self.stop()
self._time.sleep(.25)
self.shutdown()
self._time.sleep(.25)
def reinit(self, sr=44100, nchnls=2, buffersize=256, duplex=1,
audio='portaudio', jackname='pyo', ichnls=None):
"""
Reinit the server'settings. Useful to alternate between real-time and offline server.
:Args:
Same as in the __init__ method.
"""
self._nchnls = nchnls
if ichnls == None:
self._ichnls = nchnls
else:
self._ichnls = ichnls
self._amp = 1.
self._verbosity = 7
self._startoffset = 0
self._dur = -1
self._filename = None
self._fileformat = 0
self._sampletype = 0
self._globalseed = 0
self._server.__init__(sr, nchnls, buffersize, duplex, audio, jackname, self._ichnls)
def gui(self, locals=None, meter=True, timer=True, exit=True):
"""
Show the server's user interface.
:Args:
locals : locals namespace {locals(), None}, optional
If locals() is given, the interface will show an interpreter extension,
giving a way to interact with the running script. Defaults to None.
meter : boolean, optinal
If True, the interface will show a vumeter of the global output signal.
Defaults to True.
timer : boolean, optional
If True, the interface will show a clock of the current time.
Defaults to True.
exit : boolean, optional
If True, the python interpreter will exit when the 'Quit' button is pressed,
Otherwise, the GUI will be closed leaving the interpreter alive.
Defaults to True.
"""
f, win = createServerGUI(self._nchnls, self.start, self.stop, self.recstart, self.recstop,
self.setAmp, self.getIsStarted(), locals, self.shutdown, meter, timer, self._amp, exit)
if meter:
self._server.setAmpCallable(f)
if timer:
self._server.setTimeCallable(f)
try:
win.mainloop()
except:
if win != None:
win.MainLoop()
def setTimeCallable(self, func):
"""
Set a function callback that will receive the current time as argument.
The function will receive four integers in this format:
hours, minutes, seconds, milliseconds
:Args:
func : python callable
Python function or method to call with current time as argument.
"""
self.setTime = func
self._server.setTimeCallable(self)
def setMeterCallable(self, func):
"""
Set a function callback that will receive the current rms values as argument.
The function will receive a list containing the rms value for each audio channel.
:Args:
func : python callable
Python function or method to call with current rms values as argument.
"""
self.setRms = func
self._server.setAmpCallable(self)
def setMeter(self, meter):
"""
Registers a meter object to the server.
The object must have a method named `setRms`. This method will be called
with the rms values of each audio channel as argument.
:Args:
meter : python object
Python object with a `setRms` method.
"""
self._server.setAmpCallable(meter)
def setInOutDevice(self, x):
"""
Set both input and output audio devices. See `pa_list_devices()`.
:Args:
x : int
Number of the audio input and output devices.
"""
self._server.setInOutDevice(x)
def setInputDevice(self, x):
"""
Set the audio input device number. See `pa_list_devices()`.
:Args:
x : int
Number of the audio device listed by Portaudio.
"""
self._server.setInputDevice(x)
def setOutputDevice(self, x):
"""
Set the audio output device number. See `pa_list_devices()`.
:Args:
x : int
Number of the audio device listed by Portaudio.
"""
self._server.setOutputDevice(x)
def setInputOffset(self, x):
"""
Set the first physical input channel.
Channel number `x` from the soundcard will be assigned to
server's channel one, channel number `x` + 1 to server's
channel two and so on.
:Args:
x : int
Channel number.
"""
self._server.setInputOffset(x)
def setOutputOffset(self, x):
"""
Set the first physical output channel.
Server's channel one will be assigned to soundcard's channel
number `x`, server's channel two will be assigned to soundcard's
channel number `x` + 1 and so on.
:Args:
x : int
Channel number.
"""
self._server.setOutputOffset(x)
def setInOutOffset(self, x):
"""
Set the first physical input and output channels.
Set both offsets to the same value. See `setInputOffset` and
`setOutputOffset` documentation for more details.
:Args:
x : int
Channel number.
"""
self._server.setInputOffset(x)
self._server.setOutputOffset(x)
def setMidiInputDevice(self, x):
"""
Set the Midi input device number. See `pm_list_devices()`.
A number greater than the highest portmidi device index
will opened all available input devices.
:Args:
x : int
Number of the Midi device listed by Portmidi.
"""
self._server.setMidiInputDevice(x)
def setMidiOutputDevice(self, x):
"""
Set the Midi output device number. See `pm_list_devices()`.
:Args:
x : int
Number of the Midi device listed by Portmidi.
"""
self._server.setMidiOutputDevice(x)
def setSamplingRate(self, x):
"""
Set the sampling rate used by the server.
:Args:
x : int
New sampling rate, must be supported by the soundcard.
"""
self._server.setSamplingRate(x)
def setBufferSize(self, x):
"""
Set the buffer size used by the server.
:Args:
x : int
New buffer size.
"""
self._server.setBufferSize(x)
def setNchnls(self, x):
"""
Set the number of output (and input if `ichnls` = None) channels used by the server.
:Args:
x : int
New number of channels.
"""
self._nchnls = x
self._server.setNchnls(x)
def setIchnls(self, x):
"""
Set the number of input channels (if different of output channels) used by the server.
:Args:
x : int
New number of input channels.
"""
self._ichnls = x
self._server.setIchnls(x)
def setDuplex(self, x):
"""
Set the duplex mode used by the server.
:Args:
x : int {0 or 1}
New mode. 0 is output only, 1 is both ways.
"""
self._server.setDuplex(x)
def setVerbosity(self, x):
"""
Set the server's verbosity.
:Args:
x : int
A sum of values to display different levels:
- 1 = error
- 2 = message
- 4 = warning
- 8 = debug
"""
self._verbosity = x
self._server.setVerbosity(x)
def setJackAuto(self, xin=True, xout=True):
"""
Tells the server to auto-connect (or not) Jack ports to System ports.
:Args:
xin : boolean
Input Auto-connection switch. True is enabled (default) and False is disabled.
xout : boolean
Output Auto-connection switch. True is enabled (default) and False is disabled.
"""
self._server.setJackAuto(xin, xout)
def setJackAutoConnectInputPorts(self, ports):
"""
Tells the server to auto-connect Jack input ports to pre-defined Jack ports.
:Args:
ports : string or list of strings
Name of the Jack port(s) to auto-connect. Regular Expressions are allowed.
"""
ports, lmax = convertArgsToLists(ports)
self._server.setJackAutoConnectInputPorts(ports)
def setJackAutoConnectOutputPorts(self, ports):
"""
Tells the server to auto-connect Jack output ports to pre-defined Jack ports.
:Args:
ports : string or list of strings
Name of the Jack port(s) to auto-connect. Regular Expressions are allowed.
"""
ports, lmax = convertArgsToLists(ports)
self._server.setJackAutoConnectOutputPorts(ports)
def setGlobalSeed(self, x):
"""
Set the server's global seed used by random objects.
:Args:
x : int
A positive integer that will be used as the seed by random objects.
If zero, randoms will be seeded with the system clock current value.
"""
self._globalseed = x
self._server.setGlobalSeed(x)
def setStartOffset(self, x):
"""
Set the server's starting time offset. First `x` seconds will be rendered
offline as fast as possible.
:Args:
x : float
Starting time of the real-time processing.
"""
self._startoffset = x
self._server.setStartOffset(x)
def setAmp(self, x):
"""
Set the overall amplitude.
:Args:
x : float
New amplitude.
"""
self._amp = x
self._server.setAmp(x)
def shutdown(self):
"""
Shut down and clear the server. This method will erase all objects
from the callback loop. This method need to be called before changing
server's parameters like `samplingrate`, `buffersize`, `nchnls`, ...
"""
self._server.shutdown()
def boot(self, newBuffer=True):
"""
Boot the server. Must be called before defining any signal processing
chain. Server's parameters like `samplingrate`, `buffersize` or
`nchnls` will be effective after a call to this method.
:Args:
newBuffer : bool
Specify if the buffers need to be allocated or not. Useful to limit
the allocation of new buffers when the buffer size hasn't change.
Therefore, this is useful to limit calls to the Python interpreter
to get the buffers addresses when using Pyo inside a
C/C++ application with the embedded server.
Defaults to True.
"""
self._server.boot(newBuffer)
return self
def start(self):
"""
Start the audio callback loop and begin processing.
"""
self._server.start()
return self
def stop(self):
"""
Stop the audio callback loop.
"""
self._server.stop()
def recordOptions(self, dur=-1, filename=None, fileformat=0, sampletype=0):
"""
Sets options for soundfile created by offline rendering or global recording.
:Args:
dur : float
Duration, in seconds, of the recorded file. Only used by
offline rendering. Must be positive. Defaults to -1.
filename : string
Full path of the file to create. If None, a file called
`pyo_rec.wav` will be created in the user's home directory.
Defaults to None.
fileformat : int, optional
Format type of the audio file. This function will first try to
set the format from the filename extension.
If it's not possible, it uses the fileformat parameter. Supported formats are:
0. WAV - Microsoft WAV format (little endian) {.wav, .wave} (default)
1. AIFF - Apple/SGI AIFF format (big endian) {.aif, .aiff}
2. AU - Sun/NeXT AU format (big endian) {.au}
3. RAW - RAW PCM data {no extension}
4. SD2 - Sound Designer 2 {.sd2}
5. FLAC - FLAC lossless file format {.flac}
6. CAF - Core Audio File format {.caf}
7. OGG - Xiph OGG container {.ogg}
sampletype : int, optional
Bit depth encoding of the audio file.
SD2 and FLAC only support 16 or 24 bit int. Supported types are:
0. 16 bits int (default)
1. 24 bits int
2. 32 bits int
3. 32 bits float
4. 64 bits float
5. U-Law encoded
6. A-Law encoded
"""
self._dur = dur
if filename == None:
filename = os.path.join(os.path.expanduser("~"), "pyo_rec.wav")
self._filename = filename
ext = filename.rsplit('.')
if len(ext) >= 2:
ext = ext[-1].lower()
if FILE_FORMATS.has_key(ext):
fileformat = FILE_FORMATS[ext]
else:
print 'Warning: Unknown file extension. Using fileformat value.'
else:
print 'Warning: Filename has no extension. Using fileformat value.'
self._fileformat = fileformat
self._sampletype = sampletype
self._server.recordOptions(dur, filename, fileformat, sampletype)
def recstart(self, filename=None):
"""
Begins a default recording of the sound that is sent to the
soundcard. This will create a file called `pyo_rec.wav` in
the user's home directory if no path is supplied or defined
with recordOptions method. Uses file format and sample type
defined with recordOptions method.
:Args:
filename : string, optional
Name of the file to be created. Defaults to None.
"""
if filename == None:
if self._filename != None:
filename = self._filename
else:
filename = os.path.join(os.path.expanduser("~"), "pyo_rec.wav")
ext = filename.rsplit('.')
if len(ext) >= 2:
ext = ext[-1].lower()
if FILE_FORMATS.has_key(ext):
fileformat = FILE_FORMATS[ext]
if fileformat != self._fileformat:
self._fileformat = fileformat
self._server.recordOptions(self._dur, filename, self._fileformat, self._sampletype)
self._server.recstart(filename)
def recstop(self):
"""
Stop the previously started recording.
"""
self._server.recstop()
def noteout(self, pitch, velocity, channel=0, timestamp=0):
"""
Send a MIDI note message to the selected midi output device.
Arguments can be list of values to generate multiple events
in one call.
:Args:
pitch : int
Midi pitch, between 0 and 127.
velocity : int
Amplitude of the note, between 0 and 127. A note
with a velocity of 0 is equivalent to a note off.
channel : int, optional
The Midi channel, between 1 and 16, on which the
note is sent. A channel of 0 means all channels.
Defaults to 0.
timestamp : int, optional
The delay time, in milliseconds, before the note
is sent on the portmidi stream. A value of 0 means
to play the note now. Defaults to 0.
"""
pitch, velocity, channel, timestamp, lmax = convertArgsToLists(pitch, velocity, channel, timestamp)
[self._server.noteout(wrap(pitch,i), wrap(velocity,i), wrap(channel,i), wrap(timestamp,i)) for i in range(lmax)]
def afterout(self, pitch, velocity, channel=0, timestamp=0):
"""
Send an aftertouch message to the selected midi output device.
Arguments can be list of values to generate multiple events
in one call.
:Args:
pitch : int
Midi key pressed down, between 0 and 127.
velocity : int
Velocity of the pressure, between 0 and 127.
channel : int, optional
The Midi channel, between 1 and 16, on which the
note is sent. A channel of 0 means all channels.
Defaults to 0.
timestamp : int, optional
The delay time, in milliseconds, before the note
is sent on the portmidi stream. A value of 0 means
to play the note now. Defaults to 0.
"""
pitch, velocity, channel, timestamp, lmax = convertArgsToLists(pitch, velocity, channel, timestamp)
[self._server.afterout(wrap(pitch,i), wrap(velocity,i), wrap(channel,i), wrap(timestamp,i)) for i in range(lmax)]
def ctlout(self, ctlnum, value, channel=0, timestamp=0):
"""
Send a control change message to the selected midi output device.
Arguments can be list of values to generate multiple events
in one call.
:Args:
ctlnum : int
Controller number, between 0 and 127.
value : int
Value of the controller, between 0 and 127.
channel : int, optional
The Midi channel, between 1 and 16, on which the
message is sent. A channel of 0 means all channels.
Defaults to 0.
timestamp : int, optional
The delay time, in milliseconds, before the message
is sent on the portmidi stream. A value of 0 means
to play the message now. Defaults to 0.
"""
ctlnum, value, channel, timestamp, lmax = convertArgsToLists(ctlnum, value, channel, timestamp)
[self._server.ctlout(wrap(ctlnum,i), wrap(value,i), wrap(channel,i), wrap(timestamp,i)) for i in range(lmax)]
def programout(self, value, channel=0, timestamp=0):
"""
Send a program change message to the selected midi output device.
Arguments can be list of values to generate multiple events
in one call.
:Args:
value : int
New program number, between 0 and 127.
channel : int, optional
The Midi channel, between 1 and 16, on which the
message is sent. A channel of 0 means all channels.
Defaults to 0.
timestamp : int, optional
The delay time, in milliseconds, before the message
is sent on the portmidi stream. A value of 0 means
to play the message now. Defaults to 0.
"""
value, channel, timestamp, lmax = convertArgsToLists(value, channel, timestamp)
[self._server.programout(wrap(value,i), wrap(channel,i), wrap(timestamp,i)) for i in range(lmax)]
def pressout(self, value, channel=0, timestamp=0):
"""
Send a channel pressure message to the selected midi output device.
Arguments can be list of values to generate multiple events
in one call.
:Args:
value : int
Single greatest pressure value, between 0 and 127.
channel : int, optional
The Midi channel, between 1 and 16, on which the
message is sent. A channel of 0 means all channels.
Defaults to 0.
timestamp : int, optional
The delay time, in milliseconds, before the message
is sent on the portmidi stream. A value of 0 means
to play the message now. Defaults to 0.
"""
value, channel, timestamp, lmax = convertArgsToLists(value, channel, timestamp)
[self._server.pressout(wrap(value,i), wrap(channel,i), wrap(timestamp,i)) for i in range(lmax)]
def bendout(self, value, channel=0, timestamp=0):
"""
Send a pitch bend message to the selected midi output device.
Arguments can be list of values to generate multiple events
in one call.
:Args:
value : int
14 bits pitch bend value. 8192 is where there is no
bending, 0 is full down and 16383 is full up bending.
channel : int, optional
The Midi channel, between 1 and 16, on which the
message is sent. A channel of 0 means all channels.
Defaults to 0.
timestamp : int, optional
The delay time, in milliseconds, before the message
is sent on the portmidi stream. A value of 0 means
to play the message now. Defaults to 0.
"""
value, channel, timestamp, lmax = convertArgsToLists(value, channel, timestamp)
[self._server.bendout(wrap(value,i), wrap(channel,i), wrap(timestamp,i)) for i in range(lmax)]
def getStreams(self):
"""
Return the list of streams loaded in the server.
"""
return self._server.getStreams()
def getSamplingRate(self):
"""
Return the current sampling rate.
"""
return self._server.getSamplingRate()
def getNchnls(self):
"""
Return the current number of channels.
"""
return self._server.getNchnls()
def getBufferSize(self):
"""
Return the current buffer size.
"""
return self._server.getBufferSize()
def getGlobalSeed(self):
"""
Return the current global seed.
"""
return self._server.getGlobalSeed()
def getIsStarted(self):
"""
Returns 1 if the server is started, otherwise returns 0.
"""
return self._server.getIsStarted()
def getIsBooted(self):
"""
Returns 1 if the server is booted, otherwise returns 0.
"""
return self._server.getIsBooted()
def getMidiActive(self):
"""
Returns 1 if Midi callback is active, otherwise returns 0.
"""
return self._server.getMidiActive()
def getStreams(self):
"""
Returns the list of Stream objects currently in the Server memory.
"""
return self._server.getStreams()
def getNumberOfStreams(self):
"""
Returns the number of streams currently in the Server memory.
"""
return len(self._server.getStreams())
def setServer(self):
"""
Sets this server as the one to use for new objects when using the embedded device
"""
return self._server.setServer()
def getInputAddr(self):
"""
Return the address of the input buffer
"""
return self._server.getInputAddr()
def getOutputAddr(self):
"""
Return the address of the output buffer
"""
return self._server.getOutputAddr()
def getServerID(self):
"""
Return the server ID
"""
return self._server.getServerID()
def getServerAddr(self):
"""
Return the address of the server
"""
return self._server.getServerAddr()
def getEmbedICallbackAddr(self):
"""
Return the address of the interleaved embedded callback function
"""
return self._server.getEmbedICallbackAddr()
@property
def amp(self):
"""float. Overall amplitude."""
return self._amp
@amp.setter
def amp(self, x): self.setAmp(x)
@property
def startoffset(self):
"""float. Starting time of the real-time processing."""
return self._startoffset
@startoffset.setter
def startoffset(self, x): self.setStartOffset(x)
@property
def verbosity(self):
"""int. Server verbosity."""
return self._verbosity
@verbosity.setter
def verbosity(self, x):
if (type(x) == int):
self.setVerbosity(x)
else:
raise Exception("verbosity must be an integer")
@property
def globalseed(self):
"""int. Server global seed."""
return self._globalseed
@globalseed.setter
def globalseed(self, x):
if (type(x) == int):
self.setGlobalSeed(x)
else:
raise Exception("global seed must be an integer") |
Jorge-Rodriguez/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/rds_instance_facts.py | 27 | #!/usr/bin/python
# Copyright (c) 2017, 2018 Michael De La Rue
# Copyright (c) 2017, 2018 Will Thames
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: rds_instance_facts
version_added: "2.6"
short_description: obtain facts about one or more RDS instances
description:
- obtain facts about one or more RDS instances
options:
db_instance_identifier:
description:
- The RDS instance's unique identifier.
required: false
aliases:
- id
filters:
description:
- A filter that specifies one or more DB instances to describe.
See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html)
requirements:
- "python >= 2.7"
- "boto3"
author:
- "Will Thames (@willthames)"
- "Michael De La Rue (@mikedlr)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Get facts about an instance
- rds_instance_facts:
db_instance_identifier: new-database
register: new_database_facts
# Get all RDS instances
- rds_instance_facts:
'''
RETURN = '''
instances:
description: List of RDS instances
returned: always
type: complex
contains:
allocated_storage:
description: Gigabytes of storage allocated to the database
returned: always
type: int
sample: 10
auto_minor_version_upgrade:
description: Whether minor version upgrades happen automatically
returned: always
type: bool
sample: true
availability_zone:
description: Availability Zone in which the database resides
returned: always
type: str
sample: us-west-2b
backup_retention_period:
description: Days for which backups are retained
returned: always
type: int
sample: 7
ca_certificate_identifier:
description: ID for the CA certificate
returned: always
type: str
sample: rds-ca-2015
copy_tags_to_snapshot:
description: Whether DB tags should be copied to the snapshot
returned: always
type: bool
sample: false
db_instance_arn:
description: ARN of the database instance
returned: always
type: str
sample: arn:aws:rds:us-west-2:111111111111:db:helloworld-rds
db_instance_class:
description: Instance class of the database instance
returned: always
type: str
sample: db.t2.small
db_instance_identifier:
description: Database instance identifier
returned: always
type: str
sample: helloworld-rds
db_instance_port:
description: Port used by the database instance
returned: always
type: int
sample: 0
db_instance_status:
description: Status of the database instance
returned: always
type: str
sample: available
db_name:
description: Name of the database
returned: always
type: str
sample: management
db_parameter_groups:
description: List of database parameter groups
returned: always
type: complex
contains:
db_parameter_group_name:
description: Name of the database parameter group
returned: always
type: str
sample: psql-pg-helloworld
parameter_apply_status:
description: Whether the parameter group has been applied
returned: always
type: str
sample: in-sync
db_security_groups:
description: List of security groups used by the database instance
returned: always
type: list
sample: []
db_subnet_group:
description: list of subnet groups
returned: always
type: complex
contains:
db_subnet_group_description:
description: Description of the DB subnet group
returned: always
type: str
sample: My database subnet group
db_subnet_group_name:
description: Name of the database subnet group
returned: always
type: str
sample: my-subnet-group
subnet_group_status:
description: Subnet group status
returned: always
type: str
sample: Complete
subnets:
description: List of subnets in the subnet group
returned: always
type: complex
contains:
subnet_availability_zone:
description: Availability zone of the subnet
returned: always
type: complex
contains:
name:
description: Name of the availability zone
returned: always
type: str
sample: us-west-2c
subnet_identifier:
description: Subnet ID
returned: always
type: str
sample: subnet-abcd1234
subnet_status:
description: Subnet status
returned: always
type: str
sample: Active
vpc_id:
description: VPC id of the subnet group
returned: always
type: str
sample: vpc-abcd1234
dbi_resource_id:
description: AWS Region-unique, immutable identifier for the DB instance
returned: always
type: str
sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA
domain_memberships:
description: List of domain memberships
returned: always
type: list
sample: []
endpoint:
description: Database endpoint
returned: always
type: complex
contains:
address:
description: Database endpoint address
returned: always
type: str
sample: helloworld-rds.ctrqpe3so1sf.us-west-2.rds.amazonaws.com
hosted_zone_id:
description: Route53 hosted zone ID
returned: always
type: str
sample: Z1PABCD0000000
port:
description: Database endpoint port
returned: always
type: int
sample: 5432
engine:
description: Database engine
returned: always
type: str
sample: postgres
engine_version:
description: Database engine version
returned: always
type: str
sample: 9.5.10
iam_database_authentication_enabled:
description: Whether database authentication through IAM is enabled
returned: always
type: bool
sample: false
instance_create_time:
description: Date and time the instance was created
returned: always
type: str
sample: '2017-10-10T04:00:07.434000+00:00'
kms_key_id:
description: KMS Key ID
returned: always
type: str
sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-0000-abcd-1111-0123456789ab
latest_restorable_time:
description: Latest time to which a database can be restored with point-in-time restore
returned: always
type: str
sample: '2018-05-17T00:03:56+00:00'
license_model:
description: License model
returned: always
type: str
sample: postgresql-license
master_username:
description: Database master username
returned: always
type: str
sample: dbadmin
monitoring_interval:
description: Interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance
returned: always
type: int
sample: 0
multi_az:
description: Whether Multi-AZ is on
returned: always
type: bool
sample: false
option_group_memberships:
description: List of option groups
returned: always
type: complex
contains:
option_group_name:
description: Option group name
returned: always
type: str
sample: default:postgres-9-5
status:
description: Status of option group
returned: always
type: str
sample: in-sync
pending_modified_values:
description: Modified values pending application
returned: always
type: complex
contains: {}
performance_insights_enabled:
description: Whether performance insights are enabled
returned: always
type: bool
sample: false
preferred_backup_window:
description: Preferred backup window
returned: always
type: str
sample: 04:00-05:00
preferred_maintenance_window:
description: Preferred maintenance window
returned: always
type: str
sample: mon:05:00-mon:05:30
publicly_accessible:
description: Whether the DB is publicly accessible
returned: always
type: bool
sample: false
read_replica_db_instance_identifiers:
description: List of database instance read replicas
returned: always
type: list
sample: []
storage_encrypted:
description: Whether the storage is encrypted
returned: always
type: bool
sample: true
storage_type:
description: Storage type of the Database instance
returned: always
type: str
sample: gp2
tags:
description: Tags used by the database instance
returned: always
type: complex
contains: {}
vpc_security_groups:
description: List of VPC security groups
returned: always
type: complex
contains:
status:
description: Status of the VPC security group
returned: always
type: str
sample: active
vpc_security_group_id:
description: VPC Security Group ID
returned: always
type: str
sample: sg-abcd1234
'''
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, AWSRetry, camel_dict_to_snake_dict
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
def instance_facts(module, conn):
instance_name = module.params.get('db_instance_identifier')
filters = module.params.get('filters')
params = dict()
if instance_name:
params['DBInstanceIdentifier'] = instance_name
if filters:
params['Filters'] = ansible_dict_to_boto3_filter_list(filters)
paginator = conn.get_paginator('describe_db_instances')
try:
results = paginator.paginate(**params).build_full_result()['DBInstances']
except is_boto3_error_code('DBInstanceNotFound'):
results = []
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, "Couldn't get instance information")
for instance in results:
try:
instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'],
aws_retry=True)['TagList'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier'])
return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results])
def main():
argument_spec = dict(
db_instance_identifier=dict(aliases=['id']),
filters=dict(type='dict')
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
module.exit_json(**instance_facts(module, conn))
if __name__ == '__main__':
main()
|
Fedik/gramps | refs/heads/master | gramps/gui/widgets/fanchart2way.py | 5 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2007 Donald N. Allingham, Martin Hawlisch
# Copyright (C) 2009 Douglas S. Blank
# Copyright (C) 2012 Benny Malengier
# Copyright (C) 2014 Bastien Jacquet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## Based on the paper:
## http://www.cs.utah.edu/~draperg/research/fanchart/draperg_FHT08.pdf
## and the applet:
## http://www.cs.utah.edu/~draperg/research/fanchart/demo/
## Found by redwood:
## http://www.gramps-project.org/bugs/view.php?id=2611
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import math
import cairo
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..utils import hex_to_rgb
from .fanchart import (FanChartWidget, PIXELS_PER_GENERATION, BORDER_EDGE_WIDTH)
from .fanchartdesc import (FanChartBaseWidget,
FanChartDescWidget,
FanChartGrampsGUI,
NORMAL, EXPANDED, COLLAPSED,
TRANSLATE_PX, CHILDRING_WIDTH,
BACKGROUND_GRAD_GEN,
BACKGROUND_GRAD_AGE,
BACKGROUND_GRAD_PERIOD,
FORM_CIRCLE, TYPE_BOX_NORMAL, TYPE_BOX_FAMILY)
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
PIXELS_PER_GENPERSON_RATIO = 0.55 # ratio of generation radius for person
# (rest for partner)
PIXELS_PER_GEN_SMALL = 80
PIXELS_PER_GEN_LARGE = 160
N_GEN_SMALL = 4
PIXELS_PER_GENFAMILY = 25 # size of radius for family
PIXELS_PER_RECLAIM = 4 # size of the radius of pixels taken from family
# to reclaim space
PIXELS_PARTNER_GAP = 0 # Padding between someone and his partner
PIXELS_CHILDREN_GAP = 5 # Padding between generations
PARENTRING_WIDTH = 12 # width of the parent ring inside the person
ANGLE_CHEQUI = 0 # Algorithm with homogeneous children distribution
ANGLE_WEIGHT = 1 # Algorithm for angle computation based on nr of descendants
TYPE_ASCENDANCE = 0
TYPE_DESCENDANCE = 1
#-------------------------------------------------------------------------
#
# FanChart2WayWidget
#
#-------------------------------------------------------------------------
class FanChart2WayWidget(FanChartWidget, FanChartDescWidget):
"""
Interactive Fan Chart Widget.
"""
CENTER = 50 # we require a larger center
def __init__(self, dbstate, uistate, callback_popup=None):
"""
Fan Chart Widget. Handles visualization of data in self.data.
See main() of FanChartGramplet for example of model format.
"""
self.gen2people = {}
self.gen2fam = {}
self.rootangle_rad_desc = [math.radians(275), math.radians(275 + 170)]
self.rootangle_rad_asc = [math.radians(90), math.radians(270)]
self.data = {}
self.set_values(None, 6, 5, True, True, BACKGROUND_GRAD_GEN, True,
'Sans', '#0000FF', '#FF0000', None, 0.5, ANGLE_WEIGHT,
'#888a85', False)
FanChartBaseWidget.__init__(self, dbstate, uistate, callback_popup)
def reset(self):
"""
Reset the fan chart. This should trigger computation of all data
structures needed
"""
self.cache_fontcolor = {}
# fill the data structure
self._fill_data_structures()
# prepare the colors for the boxes
self.prepare_background_box(self.generations_asc +
self.generations_desc - 1)
def set_values(self, root_person_handle, maxgen_asc, maxgen_desc,
flipupsidedownname, twolinename, background,
background_gradient, fontdescr, grad_start, grad_end,
filtr, alpha_filter, angle_algo, dupcolor, showid):
"""
Reset the values to be used:
:param root_person_handle: person to show
:param maxgen_asc: maximum of ascendant generations to show
:param maxgen_desc: maximum of descendant generations to show
:param flipupsidedownname: flip name on the left of the fanchart
for the display of person's name
:param background: config setting of which background procedure to use
:type background: int
:param background_gradient: option to add an overall gradient
for distinguishing Asc/Desc
:param fontdescr: string describing the font to use
:param grad_start: colors to use for background procedure
:param grad_end: colors to use for background procedure
:param filtr: the person filter to apply to the people in the chart
:param alpha_filter: the alpha transparency value (0-1) to apply to
filtered out data
:param angle_algo: alorithm to use to calculate the sizes of the boxes
:param dupcolor: color to use for people or families that occur a second
or more time
:param showid: option to show the gramps id or not
"""
self.rootpersonh = root_person_handle
self.generations_asc = maxgen_asc
self.generations_desc = maxgen_desc
self.background = background
self.background_gradient = background_gradient
self.fontdescr = fontdescr
self.grad_start = grad_start
self.grad_end = grad_end
self.filter = filtr
self.form = FORM_CIRCLE
self.alpha_filter = alpha_filter
self.anglealgo = angle_algo
self.dupcolor = hex_to_rgb(dupcolor)
self.childring = False
self.flipupsidedownname = flipupsidedownname
self.twolinename = twolinename
self.showid = showid
def set_generations(self):
"""
Set the generations to max, and fill data structures with initial data.
"""
self.rootangle_rad_desc = [math.radians(275), math.radians(275 + 170)]
self.rootangle_rad_asc = [math.radians(90), math.radians(270)]
self.handle2desc = {}
self.famhandle2desc = {}
self.handle2fam = {}
self.gen2people = {}
self.gen2fam = {}
# no center person
self.gen2people[0] = [(None, False, 0, 2 * math.pi, 0, 0, [], NORMAL)]
self.gen2fam[0] = [] # no families
for i in range(1, self.generations_desc):
self.gen2fam[i] = []
self.gen2people[i] = []
# indication of more children
self.gen2people[self.generations_desc] = []
# Ascendance part
self.angle = {}
self.data = {}
for i in range(self.generations_asc):
# name, person, parents?, children?
self.data[i] = [(None,) * 4] * 2 ** i
self.angle[i] = []
angle = self.rootangle_rad_asc[0]
portion = 1 / (2 ** i) * (self.rootangle_rad_asc[1] -
self.rootangle_rad_asc[0])
for dummy_count in range(len(self.data[i])):
# start, stop, state
self.angle[i].append([angle, angle + portion, NORMAL])
angle += portion
def _fill_data_structures(self):
"""
Initialize the data structures
"""
self.set_generations()
if not self.rootpersonh:
return
person = self.dbstate.db.get_person_from_handle(self.rootpersonh)
if not person:
# nothing to do, just return
return
# Descendance part
# person, duplicate or not, start angle, slice size,
# text, parent pos in fam, nrfam, userdata, status
self.gen2people[0] = [[person, False, 0, 2 * math.pi, 0, 0, [], NORMAL]]
self.handle2desc[self.rootpersonh] = 0
# recursively fill in the datastructures:
nrdesc = self._rec_fill_data(0, person, 0, self.generations_desc)
self.handle2desc[person.handle] += nrdesc
self._compute_angles(*self.rootangle_rad_desc)
# Ascendance part
parents = self._have_parents(person)
child = self._have_children(person)
# Ascendance data structure is the person object, parents, child and
# list for userdata which we might fill in later.
self.data[0][0] = (person, parents, child, [])
for current in range(1, self.generations_asc):
parent = 0
# name, person, parents, children
for (pers, dummy_q, dummy_c, dummy_d) in self.data[current - 1]:
# Get father's and mother's details:
for person in [self._get_parent(pers, True),
self._get_parent(pers, False)]:
if current == self.generations_asc - 1:
parents = self._have_parents(person)
else:
parents = None
self.data[current][parent] = (person, parents, None, [])
if person is None:
# start,stop,male/right,state
self.angle[current][parent][2] = COLLAPSED
parent += 1
def nrgen_desc(self):
"""
compute the number of generations present
"""
for gen in range(self.generations_desc - 1, 0, -1):
if self.gen2people[gen]:
return gen + 1
return 1
def nrgen_asc(self):
"""
compute the number of generations present
"""
for generation in range(self.generations_asc - 1, 0, -1):
for idx in range(len(self.data[generation])):
(person, dummy_parents, dummy_child,
dummy_userdata) = self.data[generation][idx]
if person:
return generation
return 1
def maxradius_asc(self, generation):
"""
Compute the current half radius of the ascendant circle
"""
(dummy_radiusin,
radius_asc) = self.get_radiusinout_for_gen_asc(generation)
return radius_asc + BORDER_EDGE_WIDTH
def maxradius_desc(self, generation):
"""
Compute the current radius of the descendant circle
"""
(dummy_radiusin_pers, dummy_radiusout_pers, dummy_radiusin_partner,
radius_desc) = self.get_radiusinout_for_gen_pair(generation-1)
return radius_desc + BORDER_EDGE_WIDTH
def halfdist(self):
"""
Compute the current max half radius of the circle
"""
return max(self.maxradius_desc(self.nrgen_desc()),
self.maxradius_asc(self.nrgen_asc()))
def get_radiusinout_for_gen_desc(self, generation):
"""
Get the in and out radius for descendant generation
(starting with center pers = 0)
"""
radius_first_gen = (self.CENTER -
(1 - PIXELS_PER_GENPERSON_RATIO) *
PIXELS_PER_GEN_SMALL)
if generation < N_GEN_SMALL:
radius_start = PIXELS_PER_GEN_SMALL * generation + radius_first_gen
return (radius_start, radius_start + PIXELS_PER_GEN_SMALL)
else:
radius_start = (PIXELS_PER_GEN_SMALL * N_GEN_SMALL +
PIXELS_PER_GEN_LARGE * (generation - N_GEN_SMALL) +
radius_first_gen)
return (radius_start, radius_start + PIXELS_PER_GEN_LARGE)
def get_radiusinout_for_gen_asc(self, generation):
"""
Get the in and out radius for ascendant generation
(starting with center pers = 0)
"""
dummy_radiusin, radius_first_gen = self.get_radiusinout_for_gen_desc(0)
outerradius = generation * PIXELS_PER_GENERATION + radius_first_gen
innerradius = ((generation - 1) * PIXELS_PER_GENERATION +
radius_first_gen)
if generation == 0:
innerradius = CHILDRING_WIDTH + TRANSLATE_PX
return (innerradius, outerradius)
def get_radiusinout_for_gen_pair(self, generation):
"""
Get the in and out radius for descendant generation pair
(starting with center pers = 0)
:return: (radiusin_pers, radiusout_pers,
radiusin_partner, radiusout_partner)
"""
radiusin, radiusout = self.get_radiusinout_for_gen_desc(generation)
radius_spread = (radiusout - radiusin -
PIXELS_CHILDREN_GAP - PIXELS_PARTNER_GAP)
radiusin_pers = radiusin + PIXELS_CHILDREN_GAP
radiusout_pers = (radiusin_pers +
PIXELS_PER_GENPERSON_RATIO * radius_spread)
radiusin_partner = radiusout_pers + PIXELS_PARTNER_GAP
radiusout_partner = radiusout
return (radiusin_pers, radiusout_pers,
radiusin_partner, radiusout_partner)
def people_generator(self):
"""
a generator over all people outside of the core person
"""
for generation in range(self.generations_desc):
for data in self.gen2people[generation]:
yield (data[0], data[6])
for generation in range(self.generations_desc):
for data in self.gen2fam[generation]:
yield (data[7], data[6])
for generation in range(self.generations_asc):
for idx in range(len(self.data[generation])):
(person, dummy_parents, dummy_child,
userdata) = self.data[generation][idx]
yield (person, userdata)
def innerpeople_generator(self):
"""
a generator over all people inside of the core person
"""
if False:
yield
def draw_background(self, ctx):
"""
Draw the background
"""
ctx.save()
ctx.rotate(math.radians(self.rotate_value))
delta = (self.rootangle_rad_asc[0] -
self.rootangle_rad_desc[1]) / 2.0 % math.pi
ctx.move_to(0, 0)
radius_gradient_asc = 1.5 * self.maxradius_asc(self.generations_asc)
gradient_asc = cairo.RadialGradient(0, 0, self.CENTER,
0, 0, radius_gradient_asc)
color = hex_to_rgb(self.grad_end)
gradient_asc.add_color_stop_rgba(0.0, color[0] / 255, color[1] / 255,
color[2] / 255, 0.5)
gradient_asc.add_color_stop_rgba(1.0, 1, 1, 1, 0.0)
start_rad, stop_rad = (self.rootangle_rad_asc[0] - delta,
self.rootangle_rad_asc[1] + delta)
ctx.set_source(gradient_asc)
ctx.arc(0, 0, radius_gradient_asc, start_rad, stop_rad)
ctx.fill()
ctx.move_to(0, 0)
radius_gradient_desc = 1.5 * self.maxradius_desc(self.generations_desc)
gradient_desc = cairo.RadialGradient(0, 0, self.CENTER,
0, 0, radius_gradient_desc)
color = hex_to_rgb(self.grad_start)
gradient_desc.add_color_stop_rgba(0.0, color[0] / 255, color[1] / 255,
color[2] / 255, 0.5)
gradient_desc.add_color_stop_rgba(1.0, 1, 1, 1, 0.0)
start_rad, stop_rad = (self.rootangle_rad_desc[0] - delta,
self.rootangle_rad_desc[1] + delta)
ctx.set_source(gradient_desc)
ctx.arc(0, 0, radius_gradient_desc, start_rad, stop_rad)
ctx.fill()
ctx.restore()
def draw(self, ctx=None, scale=1.0):
"""
The main method to do the drawing.
If ctx is given, we assume we draw draw raw on the cairo context ctx
To draw in GTK3 and use the allocation, set ctx=None.
Note: when drawing for display, to counter a Gtk issue with scrolling
or resizing the drawing window, we draw on a surface, then copy to the
drawing context when the Gtk 'draw' signal arrives.
"""
# first do size request of what we will need
halfdist = self.halfdist()
if not ctx: # Display
size_w = size_h = 2 * halfdist
size_w_a = self.get_allocated_width()
size_h_a = self.get_allocated_height()
self.set_size_request(max(size_w, size_w_a), max(size_h, size_h_a))
size_w = self.get_allocated_width()
size_h = self.get_allocated_height()
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
size_w, size_h)
ctx = cairo.Context(self.surface)
self.center_xy = self.center_xy_from_delta()
ctx.translate(*self.center_xy)
else: # printing
self.center_xy = halfdist, halfdist
ctx.scale(scale, scale)
ctx.translate(halfdist, halfdist)
ctx.save()
# Draw background
if self.background_gradient:
self.draw_background(ctx)
# Draw center person:
(person, dup, start, portion, dummy_parentfampos, dummy_nrfam,
userdata, status) = self.gen2people[0][0]
if not person:
return
gen_remapped = self.generations_desc - 1 # remapped generation
if gen_remapped == 0:
# remapped generation
gen_remapped = (self.generations_desc + self.generations_asc - 1)
radiusin_pers, radiusout_pers, radiusin_partner, radiusout_partner = \
self.get_radiusinout_for_gen_pair(0)
radiusin = TRANSLATE_PX
radiusout = radiusout_pers
self.draw_person(ctx, person, radiusin, radiusout,
math.pi / 2, math.pi / 2 + 2 * math.pi, gen_remapped,
False, userdata, is_central_person=True)
# draw center to move chart
ctx.set_source_rgb(0, 0, 0) # black
ctx.move_to(TRANSLATE_PX, 0)
ctx.arc(0, 0, TRANSLATE_PX, 0, 2 * math.pi)
ctx.fill()
ctx.rotate(math.radians(self.rotate_value))
# Ascendance
for generation in range(self.generations_asc - 1, 0, -1):
for idx in range(len(self.data[generation])):
(person, parents, dummy_child,
userdata) = self.data[generation][idx]
if person:
start, stop, state = self.angle[generation][idx]
if state in [NORMAL, EXPANDED]:
fct = self.get_radiusinout_for_gen_asc
radiusin, radiusout = fct(generation)
dup = False
# remapped generation
gen_remapped = generation + self.generations_desc - 1
indicator = (generation == self.generations_asc - 1
and parents)
self.draw_person(ctx, person, radiusin, radiusout,
start, stop, gen_remapped, dup,
userdata, thick=(state == EXPANDED),
has_moregen_indicator=indicator)
# Descendance
for gen in range(self.generations_desc):
(radiusin_pers, radiusout_pers, radiusin_partner,
radiusout_partner) = self.get_radiusinout_for_gen_pair(gen)
gen_remapped = (self.generations_desc - gen - 1)
if gen_remapped == 0:
# remapped generation
gen_remapped = (self.generations_desc +
self.generations_asc - 1)
if gen > 0:
for pdata in self.gen2people[gen]:
# person, duplicate or not, start angle, slice size,
# parent pos in fam, nrfam, userdata, status
(pers, dup, start, portion, dummy_pospar, dummy_nrfam,
userdata, status) = pdata
if status != COLLAPSED:
self.draw_person(ctx, pers, radiusin_pers,
radiusout_pers,
start, start + portion,
gen_remapped, dup, userdata,
thick=status != NORMAL)
#if gen < self.generations_desc - 1:
for famdata in self.gen2fam[gen]:
# family, duplicate or not, start angle, slice size,
# spouse pos in gen, nrchildren, userdata, status
(fam, dup, start, portion, dummy_posfam, dummy_nrchild,
userdata, partner, status) = famdata
if status != COLLAPSED:
more_pers_flag = (gen == self.generations_desc - 1
and fam.get_child_ref_list())
self.draw_person(ctx, partner,
radiusin_partner, radiusout_partner,
start, start + portion,
gen_remapped, dup, userdata,
thick=(status != NORMAL),
has_moregen_indicator=more_pers_flag)
ctx.restore()
if self.background in [BACKGROUND_GRAD_AGE, BACKGROUND_GRAD_PERIOD]:
self.draw_gradient_legend(ctx, halfdist)
def cell_address_under_cursor(self, curx, cury):
"""
Determine the cell address in the fan under the cursor
position x and y.
None if outside of diagram
"""
radius, rads, dummy_raw_rads = self.cursor_to_polar(curx, cury,
get_raw_rads=True)
if radius < TRANSLATE_PX:
return None
radius_parents = self.get_radiusinout_for_gen_asc(0)[1]
if ((radius < radius_parents) or
self.radian_in_bounds(self.rootangle_rad_desc[0], rads,
self.rootangle_rad_desc[1])):
cell_address = self.cell_address_under_cursor_desc(rads, radius)
if cell_address is not None:
return (TYPE_DESCENDANCE,) + cell_address
elif self.radian_in_bounds(self.rootangle_rad_asc[0], rads,
self.rootangle_rad_asc[1]):
cell_address = self.cell_address_under_cursor_asc(rads, radius)
if cell_address and cell_address[0] == 0:
return None # There is a gap before first parents
if cell_address is not None:
return (TYPE_ASCENDANCE,) + cell_address
return None
def cell_address_under_cursor_desc(self, rads, radius):
"""
Determine the cell address in the fan under the cursor
position x and y.
None if outside of diagram
"""
generation, selected, btype = None, None, TYPE_BOX_NORMAL
for gen in range(self.generations_desc):
(radiusin_pers, radiusout_pers, radiusin_partner,
radiusout_partner) = self.get_radiusinout_for_gen_pair(gen)
if radiusin_pers <= radius <= radiusout_pers:
generation, btype = gen, TYPE_BOX_NORMAL
break
if radiusin_partner <= radius <= radiusout_partner:
generation, btype = gen, TYPE_BOX_FAMILY
break
# find what person is in this position:
if not (generation is None) and generation > 0:
selected = FanChartDescWidget.personpos_at_angle(self, generation,
rads, btype)
if (generation is None or selected is None):
return None
return generation, selected, btype
def cell_address_under_cursor_asc(self, rads, radius):
"""
Determine the cell address in the fan under the cursor
position x and y.
None if outside of diagram
"""
generation, selected = None, None
for gen in range(self.generations_asc):
radiusin, radiusout = self.get_radiusinout_for_gen_asc(gen)
if radiusin <= radius <= radiusout:
generation = gen
break
# find what person is in this position:
if not (generation is None) and generation > 0:
selected = FanChartWidget.personpos_at_angle(self, generation, rads)
if (generation is None or selected is None):
return None
return generation, selected
def person_at(self, cell_address):
"""
returns the person at radius_first_gen
"""
direction = cell_address[0]
if direction == TYPE_ASCENDANCE:
return FanChartWidget.person_at(self, cell_address[1:])
elif direction == TYPE_DESCENDANCE:
return FanChartDescWidget.person_at(self, cell_address[1:])
return None
def family_at(self, cell_address):
"""
returns the family at cell_address
"""
direction = cell_address[0]
if direction == TYPE_ASCENDANCE:
return None
elif direction == TYPE_DESCENDANCE:
return FanChartDescWidget.family_at(self, cell_address[1:])
return None
def do_mouse_click(self):
# no drag occured, expand or collapse the section
self.toggle_cell_state(self._mouse_click_cell_address)
self._mouse_click = False
self.draw()
self.queue_draw()
def expand_parents(self, generation, selected, current):
if generation >= self.generations_asc:
return
selected = 2 * selected
start, stop, state = self.angle[generation][selected]
if state in [NORMAL, EXPANDED]:
portion = (stop - start) * 2.0
self.angle[generation][selected] = [current, current + portion,
state]
self.expand_parents(generation + 1, selected, current)
current += portion
start, stop, state = self.angle[generation][selected + 1]
if state in [NORMAL, EXPANDED]:
portion = (stop - start) * 2.0
self.angle[generation][selected + 1] = [current, current + portion,
state]
self.expand_parents(generation + 1, selected + 1, current)
def show_parents(self, generation, selected, angle, portion):
if generation >= self.generations_asc:
return
selected *= 2
self.angle[generation][selected][0] = angle
self.angle[generation][selected][1] = angle + portion
self.angle[generation][selected][2] = NORMAL
self.show_parents(generation + 1, selected, angle, portion / 2.0)
self.angle[generation][selected + 1][0] = angle + portion
self.angle[generation][selected + 1][1] = angle + portion + portion
self.angle[generation][selected + 1][2] = NORMAL
self.show_parents(generation + 1, selected + 1,
angle + portion, portion / 2.0)
def hide_parents(self, generation, selected, angle):
if generation >= self.generations_asc:
return
selected = 2 * selected
self.angle[generation][selected][0] = angle
self.angle[generation][selected][1] = angle
self.angle[generation][selected][2] = COLLAPSED
self.hide_parents(generation + 1, selected, angle)
self.angle[generation][selected + 1][0] = angle
self.angle[generation][selected + 1][1] = angle
self.angle[generation][selected + 1][2] = COLLAPSED
self.hide_parents(generation + 1, selected + 1, angle)
def shrink_parents(self, generation, selected, current):
if generation >= self.generations_asc:
return
selected = 2 * selected
start, stop, state = self.angle[generation][selected]
if state in [NORMAL, EXPANDED]:
portion = (stop - start) / 2.0
self.angle[generation][selected] = [current, current + portion,
state]
self.shrink_parents(generation + 1, selected, current)
current += portion
start, stop, state = self.angle[generation][selected + 1]
if state in [NORMAL, EXPANDED]:
portion = (stop - start) / 2.0
self.angle[generation][selected + 1] = [current, current + portion,
state]
self.shrink_parents(generation + 1, selected + 1, current)
def toggle_cell_state(self, cell_address):
direction = cell_address[0]
if direction == TYPE_ASCENDANCE:
FanChartWidget.toggle_cell_state(self, cell_address[1:])
elif direction == TYPE_DESCENDANCE:
FanChartDescWidget.toggle_cell_state(self, cell_address[1:])
self._compute_angles(*self.rootangle_rad_desc)
class FanChart2WayGrampsGUI(FanChartGrampsGUI):
""" class for functions fanchart GUI elements will need in Gramps
"""
def main(self):
"""
Fill the data structures with the active data. This initializes all
data.
"""
root_person_handle = self.get_active('Person')
self.fan.set_values(root_person_handle, self.generations_asc,
self.generations_desc, self.flipupsidedownname,
self.twolinename, self.background,
self.background_gradient, self.fonttype,
self.grad_start, self.grad_end, self.generic_filter,
self.alpha_filter, self.angle_algo, self.dupcolor,
self.showid)
self.fan.reset()
self.fan.draw()
self.fan.queue_draw()
|
zjh3123629/qt210-linux | refs/heads/master | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
shayan72/Courseware | refs/heads/master | Courseware/urls.py | 1 | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'Courseware.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^courses/', include('course.urls')),
url(r'^manage/', include('course_admin.urls')),
url(r'^user/', include('account.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
SaptakS/pune.pycon.org | refs/heads/master | pycon/sponsorship/tests/test_models.py | 5 | import datetime
from django.conf import settings
from django.core.exceptions import ValidationError
from django.test import TestCase
from symposion.conference.models import current_conference, Conference
from ..models import Benefit, Sponsor, SponsorBenefit, SponsorLevel
from .factories import SponsorFactory
class TestApprovalTimestamp(TestCase):
def test_initial_active_true(self):
"""
New Sponsor should have _initial_active and approval_time set
correctly.
"""
sponsor = SponsorFactory(active=True, approval_time=None)
self.assertTrue(sponsor._initial_active)
self.assertIsNotNone(sponsor.approval_time)
def test_initial_active_false(self):
"""
New Sponsor should have _initial_active and approval_time set
correctly.
"""
sponsor = SponsorFactory(active=False, approval_time=None)
self.assertFalse(sponsor._initial_active)
self.assertIsNone(sponsor.approval_time)
def test_set_approval(self):
"""
Saved Sponsor should have _initial_active and approval_time set
correctly.
"""
sponsor = SponsorFactory(active=False)
sponsor.active = True
sponsor.save()
self.assertIsNotNone(sponsor.approval_time)
self.assertTrue(sponsor._initial_active)
def test_unset_approval(self):
"""
Saved Sponsor should have _initial_active and approval_time set
correctly.
"""
sponsor = SponsorFactory(active=True)
sponsor.active = False
sponsor.save()
self.assertIsNone(sponsor.approval_time)
self.assertFalse(sponsor._initial_active)
class TestBenefitValidation(TestCase):
"""
It should not be possible to save a SponsorBenefit if it has the
wrong kind of data in it - e.g. a text-type benefit cannot have
an uploaded file, and vice-versa.
"""
def setUp(self):
# we need a sponsor
Conference.objects.get_or_create(pk=settings.CONFERENCE_ID)
conference = current_conference()
self.sponsor_level = SponsorLevel.objects.create(
conference=conference, name="Lead", cost=1)
self.sponsor = Sponsor.objects.create(
name="Big Daddy",
level=self.sponsor_level,
)
# Create our benefit types
self.text_type = Benefit.objects.create(name="text", type="text")
self.file_type = Benefit.objects.create(name="file", type="file")
self.weblogo_type = Benefit.objects.create(name="log", type="weblogo")
self.simple_type = Benefit.objects.create(name="simple", type="simple")
def validate(self, should_work, benefit_type, upload, text):
obj = SponsorBenefit(
benefit=benefit_type,
sponsor=self.sponsor,
upload=upload,
text=text
)
if should_work:
obj.save()
else:
with self.assertRaises(ValidationError):
obj.save()
def test_text_has_text(self):
self.validate(True, self.text_type, upload=None, text="Some text")
def test_text_has_upload(self):
self.validate(False, self.text_type, upload="filename", text='')
def test_text_has_both(self):
self.validate(False, self.text_type, upload="filename", text="Text")
def test_file_has_text(self):
self.validate(False, self.file_type, upload=None, text="Some text")
def test_file_has_upload(self):
self.validate(True, self.file_type, upload="filename", text='')
def test_file_has_both(self):
self.validate(False, self.file_type, upload="filename", text="Text")
def test_weblogo_has_text(self):
self.validate(False, self.weblogo_type, upload=None, text="Some text")
def test_weblogo_has_upload(self):
self.validate(True, self.weblogo_type, upload="filename", text='')
def test_weblogo_has_both(self):
self.validate(False, self.weblogo_type, upload="filename", text="Text")
def test_simple_has_neither(self):
self.validate(True, self.simple_type, upload=None, text='')
def test_simple_has_text(self):
self.validate(True, self.simple_type, upload=None, text="Some text")
def test_simple_has_upload(self):
self.validate(False, self.simple_type, upload="filename", text='')
def test_simple_has_both(self):
self.validate(False, self.simple_type, upload="filename", text="Text")
|
unusedPhD/amoco | refs/heads/release | amoco/system/msp430.py | 6 | # -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2014 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
from amoco.system.core import *
from amoco.arch.msp430 import cpu
#----------------------------------------------------------------------------
class MSP430(CoreExec):
def __init__(self,p):
CoreExec.__init__(self,p,cpu)
# load the program into virtual memory (populate the mmap dict)
def load_binary(self):
# use 32K RAM
self.mmap.write(0x0200,'\0'*0x8000)
self.mmap.write(0x4400,self.bin)
def initenv(self):
from amoco.cas.mapper import mapper
m = mapper()
for r in self.cpu.R: m[r] = self.cpu.cst(0,16)
m[self.cpu.pc] = self.cpu.cst(0x4400,16)
return m
# optional codehelper method allows platform-specific analysis of
# either a (raw) list of instruction, a block/func object (see amoco.code)
# the default helper is a no-op:
def codehelper(self,seq=None,block=None,func=None):
if seq is not None: return seq
if block is not None: return block
if func is not None: return func
|
edx/django-rest-framework | refs/heads/master | tests/test_authentication.py | 3 | # coding: utf-8
from __future__ import unicode_literals
import base64
import pytest
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.db import models
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django.utils import six
from rest_framework import (
HTTP_HEADER_ENCODING, exceptions, permissions, renderers, status
)
from rest_framework.authentication import (
BaseAuthentication, BasicAuthentication, SessionAuthentication,
TokenAuthentication
)
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import obtain_auth_token
from rest_framework.compat import is_authenticated
from rest_framework.response import Response
from rest_framework.test import APIClient, APIRequestFactory
from rest_framework.views import APIView
factory = APIRequestFactory()
class CustomToken(models.Model):
key = models.CharField(max_length=40, primary_key=True)
user = models.OneToOneField(User, on_delete=models.CASCADE)
class CustomTokenAuthentication(TokenAuthentication):
model = CustomToken
class CustomKeywordTokenAuthentication(TokenAuthentication):
keyword = 'Bearer'
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def put(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = [
url(
r'^session/$',
MockView.as_view(authentication_classes=[SessionAuthentication])
),
url(
r'^basic/$',
MockView.as_view(authentication_classes=[BasicAuthentication])
),
url(
r'^token/$',
MockView.as_view(authentication_classes=[TokenAuthentication])
),
url(
r'^customtoken/$',
MockView.as_view(authentication_classes=[CustomTokenAuthentication])
),
url(
r'^customkeywordtoken/$',
MockView.as_view(
authentication_classes=[CustomKeywordTokenAuthentication]
)
),
url(r'^auth-token/$', obtain_auth_token),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
]
@override_settings(ROOT_URLCONF='tests.test_authentication')
class BasicAuthTests(TestCase):
"""Basic authentication"""
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(
self.username, self.email, self.password
)
def test_post_form_passing_basic_auth(self):
"""Ensure POSTing json over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(
credentials.encode(HTTP_HEADER_ENCODING)
).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post(
'/basic/',
{'example': 'example'},
HTTP_AUTHORIZATION=auth
)
assert response.status_code == status.HTTP_200_OK
def test_post_json_passing_basic_auth(self):
"""Ensure POSTing form over basic auth with correct credentials passes and does not require CSRF"""
credentials = ('%s:%s' % (self.username, self.password))
base64_credentials = base64.b64encode(
credentials.encode(HTTP_HEADER_ENCODING)
).decode(HTTP_HEADER_ENCODING)
auth = 'Basic %s' % base64_credentials
response = self.csrf_client.post(
'/basic/',
{'example': 'example'},
format='json',
HTTP_AUTHORIZATION=auth
)
assert response.status_code == status.HTTP_200_OK
def test_regression_handle_bad_base64_basic_auth_header(self):
"""Ensure POSTing JSON over basic auth with incorrectly padded Base64 string is handled correctly"""
# regression test for issue in 'rest_framework.authentication.BasicAuthentication.authenticate'
# https://github.com/encode/django-rest-framework/issues/4089
auth = 'Basic =a='
response = self.csrf_client.post(
'/basic/',
{'example': 'example'},
format='json',
HTTP_AUTHORIZATION=auth
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_post_form_failing_basic_auth(self):
"""Ensure POSTing form over basic auth without correct credentials fails"""
response = self.csrf_client.post('/basic/', {'example': 'example'})
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_post_json_failing_basic_auth(self):
"""Ensure POSTing json over basic auth without correct credentials fails"""
response = self.csrf_client.post(
'/basic/',
{'example': 'example'},
format='json'
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
assert response['WWW-Authenticate'] == 'Basic realm="api"'
def test_fail_post_if_credentials_are_missing(self):
response = self.csrf_client.post(
'/basic/', {'example': 'example'}, HTTP_AUTHORIZATION='Basic ')
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_fail_post_if_credentials_contain_spaces(self):
response = self.csrf_client.post(
'/basic/', {'example': 'example'},
HTTP_AUTHORIZATION='Basic foo bar'
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
@override_settings(ROOT_URLCONF='tests.test_authentication')
class SessionAuthTests(TestCase):
"""User session authentication"""
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.non_csrf_client = APIClient(enforce_csrf_checks=False)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(
self.username, self.email, self.password
)
def tearDown(self):
self.csrf_client.logout()
def test_login_view_renders_on_get(self):
"""
Ensure the login template renders for a basic GET.
cf. [#1810](https://github.com/encode/django-rest-framework/pull/1810)
"""
response = self.csrf_client.get('/auth/login/')
content = response.content.decode('utf8')
assert '<label for="id_username">Username:</label>' in content
def test_post_form_session_auth_failing_csrf(self):
"""
Ensure POSTing form over session authentication without CSRF token fails.
"""
self.csrf_client.login(username=self.username, password=self.password)
response = self.csrf_client.post('/session/', {'example': 'example'})
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_post_form_session_auth_passing(self):
"""
Ensure POSTing form over session authentication with logged in
user and CSRF token passes.
"""
self.non_csrf_client.login(
username=self.username, password=self.password
)
response = self.non_csrf_client.post(
'/session/', {'example': 'example'}
)
assert response.status_code == status.HTTP_200_OK
def test_put_form_session_auth_passing(self):
"""
Ensure PUTting form over session authentication with
logged in user and CSRF token passes.
"""
self.non_csrf_client.login(
username=self.username, password=self.password
)
response = self.non_csrf_client.put(
'/session/', {'example': 'example'}
)
assert response.status_code == status.HTTP_200_OK
def test_post_form_session_auth_failing(self):
"""
Ensure POSTing form over session authentication without logged in user fails.
"""
response = self.csrf_client.post('/session/', {'example': 'example'})
assert response.status_code == status.HTTP_403_FORBIDDEN
class BaseTokenAuthTests(object):
"""Token authentication"""
model = None
path = None
header_prefix = 'Token '
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(
self.username, self.email, self.password
)
self.key = 'abcd1234'
self.token = self.model.objects.create(key=self.key, user=self.user)
def test_post_form_passing_token_auth(self):
"""
Ensure POSTing json over token auth with correct
credentials passes and does not require CSRF
"""
auth = self.header_prefix + self.key
response = self.csrf_client.post(
self.path, {'example': 'example'}, HTTP_AUTHORIZATION=auth
)
assert response.status_code == status.HTTP_200_OK
def test_fail_authentication_if_user_is_not_active(self):
user = User.objects.create_user('foo', 'bar', 'baz')
user.is_active = False
user.save()
self.model.objects.create(key='foobar_token', user=user)
response = self.csrf_client.post(
self.path, {'example': 'example'},
HTTP_AUTHORIZATION=self.header_prefix + 'foobar_token'
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_fail_post_form_passing_nonexistent_token_auth(self):
# use a nonexistent token key
auth = self.header_prefix + 'wxyz6789'
response = self.csrf_client.post(
self.path, {'example': 'example'}, HTTP_AUTHORIZATION=auth
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_fail_post_if_token_is_missing(self):
response = self.csrf_client.post(
self.path, {'example': 'example'},
HTTP_AUTHORIZATION=self.header_prefix)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_fail_post_if_token_contains_spaces(self):
response = self.csrf_client.post(
self.path, {'example': 'example'},
HTTP_AUTHORIZATION=self.header_prefix + 'foo bar'
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_fail_post_form_passing_invalid_token_auth(self):
# add an 'invalid' unicode character
auth = self.header_prefix + self.key + "¸"
response = self.csrf_client.post(
self.path, {'example': 'example'}, HTTP_AUTHORIZATION=auth
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_post_json_passing_token_auth(self):
"""
Ensure POSTing form over token auth with correct
credentials passes and does not require CSRF
"""
auth = self.header_prefix + self.key
response = self.csrf_client.post(
self.path, {'example': 'example'},
format='json', HTTP_AUTHORIZATION=auth
)
assert response.status_code == status.HTTP_200_OK
def test_post_json_makes_one_db_query(self):
"""
Ensure that authenticating a user using a
token performs only one DB query
"""
auth = self.header_prefix + self.key
def func_to_test():
return self.csrf_client.post(
self.path, {'example': 'example'},
format='json', HTTP_AUTHORIZATION=auth
)
self.assertNumQueries(1, func_to_test)
def test_post_form_failing_token_auth(self):
"""
Ensure POSTing form over token auth without correct credentials fails
"""
response = self.csrf_client.post(self.path, {'example': 'example'})
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_post_json_failing_token_auth(self):
"""
Ensure POSTing json over token auth without correct credentials fails
"""
response = self.csrf_client.post(
self.path, {'example': 'example'}, format='json'
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
@override_settings(ROOT_URLCONF='tests.test_authentication')
class TokenAuthTests(BaseTokenAuthTests, TestCase):
model = Token
path = '/token/'
def test_token_has_auto_assigned_key_if_none_provided(self):
"""Ensure creating a token with no key will auto-assign a key"""
self.token.delete()
token = self.model.objects.create(user=self.user)
assert bool(token.key)
def test_generate_key_returns_string(self):
"""Ensure generate_key returns a string"""
token = self.model()
key = token.generate_key()
assert isinstance(key, six.string_types)
def test_token_login_json(self):
"""Ensure token login view using JSON POST works."""
client = APIClient(enforce_csrf_checks=True)
response = client.post(
'/auth-token/',
{'username': self.username, 'password': self.password},
format='json'
)
assert response.status_code == status.HTTP_200_OK
assert response.data['token'] == self.key
def test_token_login_json_bad_creds(self):
"""
Ensure token login view using JSON POST fails if
bad credentials are used
"""
client = APIClient(enforce_csrf_checks=True)
response = client.post(
'/auth-token/',
{'username': self.username, 'password': "badpass"},
format='json'
)
assert response.status_code == 400
def test_token_login_json_missing_fields(self):
"""Ensure token login view using JSON POST fails if missing fields."""
client = APIClient(enforce_csrf_checks=True)
response = client.post('/auth-token/',
{'username': self.username}, format='json')
assert response.status_code == 400
def test_token_login_form(self):
"""Ensure token login view using form POST works."""
client = APIClient(enforce_csrf_checks=True)
response = client.post(
'/auth-token/',
{'username': self.username, 'password': self.password}
)
assert response.status_code == status.HTTP_200_OK
assert response.data['token'] == self.key
@override_settings(ROOT_URLCONF='tests.test_authentication')
class CustomTokenAuthTests(BaseTokenAuthTests, TestCase):
model = CustomToken
path = '/customtoken/'
@override_settings(ROOT_URLCONF='tests.test_authentication')
class CustomKeywordTokenAuthTests(BaseTokenAuthTests, TestCase):
model = Token
path = '/customkeywordtoken/'
header_prefix = 'Bearer '
class IncorrectCredentialsTests(TestCase):
def test_incorrect_credentials(self):
"""
If a request contains bad authentication credentials, then
authentication should run and error, even if no permissions
are set on the view.
"""
class IncorrectCredentialsAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('Bad credentials')
request = factory.get('/')
view = MockView.as_view(
authentication_classes=(IncorrectCredentialsAuth,),
permission_classes=()
)
response = view(request)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.data == {'detail': 'Bad credentials'}
class FailingAuthAccessedInRenderer(TestCase):
def setUp(self):
class AuthAccessingRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'txt'
def render(self, data, media_type=None, renderer_context=None):
request = renderer_context['request']
if is_authenticated(request.user):
return b'authenticated'
return b'not authenticated'
class FailingAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('authentication failed')
class ExampleView(APIView):
authentication_classes = (FailingAuth,)
renderer_classes = (AuthAccessingRenderer,)
def get(self, request):
return Response({'foo': 'bar'})
self.view = ExampleView.as_view()
def test_failing_auth_accessed_in_renderer(self):
"""
When authentication fails the renderer should still be able to access
`request.user` without raising an exception. Particularly relevant
to HTML responses that might reasonably access `request.user`.
"""
request = factory.get('/')
response = self.view(request)
content = response.render().content
assert content == b'not authenticated'
class NoAuthenticationClassesTests(TestCase):
def test_permission_message_with_no_authentication_classes(self):
"""
An unauthenticated request made against a view that contains no
`authentication_classes` but do contain `permissions_classes` the error
code returned should be 403 with the exception's message.
"""
class DummyPermission(permissions.BasePermission):
message = 'Dummy permission message'
def has_permission(self, request, view):
return False
request = factory.get('/')
view = MockView.as_view(
authentication_classes=(),
permission_classes=(DummyPermission,),
)
response = view(request)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.data == {'detail': 'Dummy permission message'}
class BasicAuthenticationUnitTests(TestCase):
def test_base_authentication_abstract_method(self):
with pytest.raises(NotImplementedError):
BaseAuthentication().authenticate({})
def test_basic_authentication_raises_error_if_user_not_found(self):
auth = BasicAuthentication()
with pytest.raises(exceptions.AuthenticationFailed):
auth.authenticate_credentials('invalid id', 'invalid password')
def test_basic_authentication_raises_error_if_user_not_active(self):
from rest_framework import authentication
class MockUser(object):
is_active = False
old_authenticate = authentication.authenticate
authentication.authenticate = lambda **kwargs: MockUser()
auth = authentication.BasicAuthentication()
with pytest.raises(exceptions.AuthenticationFailed) as error:
auth.authenticate_credentials('foo', 'bar')
assert 'User inactive or deleted.' in str(error)
authentication.authenticate = old_authenticate
|
smmribeiro/intellij-community | refs/heads/master | python/testData/optimizeImports/multilineImportElementsInCombinedFromImports.after.py | 30 | from collections import deque as d, OrderedDict as od, namedtuple # comment 1; comment 2
print(d, od, namedtuple)
|
ESOedX/edx-platform | refs/heads/master | lms/djangoapps/verify_student/migrations/0007_idverificationaggregate.py | 2 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-12 15:22
from __future__ import absolute_import, unicode_literals
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('verify_student', '0006_ssoverification'),
]
operations = [
migrations.CreateModel(
name='IDVerificationAggregate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', model_utils.fields.StatusField(choices=[(b'created', b'created'), (b'ready', b'ready'), (b'submitted', b'submitted'), (b'must_retry', b'must_retry'), (b'approved', b'approved'), (b'denied', b'denied')], default=b'created', max_length=100, no_check_for_status=True, verbose_name='status')),
('status_changed', model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='status', verbose_name='status changed')),
('name', models.CharField(blank=True, max_length=255)),
('object_id', models.PositiveIntegerField()),
('created_at', models.DateTimeField(db_index=True)),
('updated_at', models.DateTimeField(db_index=True)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at'],
},
),
]
|
JCROM-Android/jcrom_external_chromium_org | refs/heads/kitkat | media/tools/constrained_network_server/traffic_control_test.py | 187 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""End-to-end tests for traffic control library."""
import os
import re
import sys
import unittest
import traffic_control
class TrafficControlTests(unittest.TestCase):
"""System tests for traffic_control functions.
These tests require root access.
"""
# A dummy interface name to use instead of real interface.
_INTERFACE = 'myeth'
def setUp(self):
"""Setup a dummy interface."""
# If we update to python version 2.7 or newer we can use setUpClass() or
# unittest.skipIf().
if os.getuid() != 0:
sys.exit('You need root access to run these tests.')
command = ['ip', 'link', 'add', 'name', self._INTERFACE, 'type', 'dummy']
traffic_control._Exec(command, 'Error creating dummy interface %s.' %
self._INTERFACE)
def tearDown(self):
"""Teardown the dummy interface and any network constraints on it."""
# Deleting the dummy interface deletes all associated constraints.
command = ['ip', 'link', 'del', self._INTERFACE]
traffic_control._Exec(command)
def testExecOutput(self):
output = traffic_control._Exec(['echo', ' Test '])
self.assertEqual(output, 'Test')
def testExecException(self):
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._Exec, command=['ls', '!doesntExist!'])
def testExecErrorCustomMsg(self):
try:
traffic_control._Exec(['ls', '!doesntExist!'], msg='test_msg')
self.fail('No exception raised for invalid command.')
except traffic_control.TrafficControlError as e:
self.assertEqual(e.msg, 'test_msg')
def testAddRootQdisc(self):
"""Checks adding a root qdisc is successful."""
config = {'interface': self._INTERFACE}
root_detail = 'qdisc htb 1: root'
# Assert no htb root at startup.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(root_detail in output)
traffic_control._AddRootQdisc(config['interface'])
output = traffic_control._Exec(command)
# Assert htb root is added.
self.assertTrue(root_detail in output)
def testConfigureClassAdd(self):
"""Checks adding and deleting a class to the root qdisc."""
config = {
'interface': self._INTERFACE,
'port': 12345,
'server_port': 33333,
'bandwidth': 2000
}
class_detail = ('class htb 1:%x root prio 0 rate %dKbit ceil %dKbit' %
(config['port'], config['bandwidth'], config['bandwidth']))
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Assert class does not exist prior to adding it.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(class_detail in output)
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Assert class is added.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertTrue(class_detail in output)
# Delete class.
traffic_control._ConfigureClass('del', config)
# Assert class is deleted.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(class_detail in output)
def testAddSubQdisc(self):
"""Checks adding a sub qdisc to existing class."""
config = {
'interface': self._INTERFACE,
'port': 12345,
'server_port': 33333,
'bandwidth': 2000,
'latency': 250,
'loss': 5
}
qdisc_re_detail = ('qdisc netem %x: parent 1:%x .* delay %d.0ms loss %d%%' %
(config['port'], config['port'], config['latency'],
config['loss']))
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Assert qdisc does not exist prior to adding it.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
handle_id_re = re.search(qdisc_re_detail, output)
self.assertEqual(handle_id_re, None)
# Add qdisc to class.
traffic_control._AddSubQdisc(config)
# Assert qdisc is added.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
handle_id_re = re.search(qdisc_re_detail, output)
self.assertNotEqual(handle_id_re, None)
def testAddDeleteFilter(self):
config = {
'interface': self._INTERFACE,
'port': 12345,
'bandwidth': 2000
}
# Assert no filter exists.
command = ['tc', 'filter', 'list', 'dev', config['interface'], 'parent',
'1:0']
output = traffic_control._Exec(command)
self.assertEqual(output, '')
# Create the root and class to which the filter will be attached.
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Add the filter.
traffic_control._AddFilter(config['interface'], config['port'])
handle_id = traffic_control._GetFilterHandleId(config['interface'],
config['port'])
self.assertNotEqual(handle_id, None)
# Delete the filter.
# The output of tc filter list is not None because tc adds default filters.
traffic_control._DeleteFilter(config['interface'], config['port'])
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._GetFilterHandleId, config['interface'],
config['port'])
if __name__ == '__main__':
unittest.main()
|
payeezy/payeezy-deprecated | refs/heads/master | python/Dependancy/requests-master/requests/packages/chardet/hebrewprober.py | 2928 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
MonicaHsu/truvaluation | refs/heads/master | venv/lib/python2.7/plat-mac/FrameWork.py | 40 | "A sort of application framework for the Mac"
DEBUG=0
from warnings import warnpy3k
warnpy3k("In 3.x, the FrameWork module is removed.", stacklevel=2)
import MacOS
import traceback
from Carbon.AE import *
from Carbon.AppleEvents import *
from Carbon.Ctl import *
from Carbon.Controls import *
from Carbon.Dlg import *
from Carbon.Dialogs import *
from Carbon.Evt import *
from Carbon.Events import *
from Carbon.Help import *
from Carbon.Menu import *
from Carbon.Menus import *
from Carbon.Qd import *
from Carbon.QuickDraw import *
#from Carbon.Res import *
#from Carbon.Resources import *
#from Carbon.Snd import *
#from Carbon.Sound import *
from Carbon.Win import *
from Carbon.Windows import *
import types
import EasyDialogs
try:
MyFrontWindow = FrontNonFloatingWindow
except NameError:
MyFrontWindow = FrontWindow
kHighLevelEvent = 23 # Don't know what header file this should come from
SCROLLBARWIDTH = 16 # Again, not a clue...
# Trick to forestall a set of SIOUX menus being added to our menubar
SIOUX_APPLEMENU_ID=32000
# Map event 'what' field to strings
eventname = {}
eventname[1] = 'mouseDown'
eventname[2] = 'mouseUp'
eventname[3] = 'keyDown'
eventname[4] = 'keyUp'
eventname[5] = 'autoKey'
eventname[6] = 'updateEvt'
eventname[7] = 'diskEvt'
eventname[8] = 'activateEvt'
eventname[15] = 'osEvt'
eventname[23] = 'kHighLevelEvent'
# Map part codes returned by WhichWindow() to strings
partname = {}
partname[0] = 'inDesk'
partname[1] = 'inMenuBar'
partname[2] = 'inSysWindow'
partname[3] = 'inContent'
partname[4] = 'inDrag'
partname[5] = 'inGrow'
partname[6] = 'inGoAway'
partname[7] = 'inZoomIn'
partname[8] = 'inZoomOut'
#
# The useable portion of the screen
# ## but what happens with multiple screens? jvr
screenbounds = GetQDGlobalsScreenBits().bounds
screenbounds = screenbounds[0]+4, screenbounds[1]+4, \
screenbounds[2]-4, screenbounds[3]-4
next_window_x = 16 # jvr
next_window_y = 44 # jvr
def windowbounds(width, height):
"Return sensible window bounds"
global next_window_x, next_window_y
r, b = next_window_x+width, next_window_y+height
if r > screenbounds[2]:
next_window_x = 16
if b > screenbounds[3]:
next_window_y = 44
l, t = next_window_x, next_window_y
r, b = next_window_x+width, next_window_y+height
next_window_x, next_window_y = next_window_x + 8, next_window_y + 20 # jvr
return l, t, r, b
_watch = None
def setwatchcursor():
global _watch
if _watch is None:
_watch = GetCursor(4).data
SetCursor(_watch)
def setarrowcursor():
SetCursor(GetQDGlobalsArrow())
class Application:
"Application framework -- your application should be a derived class"
def __init__(self, nomenubar=0):
self._doing_asyncevents = 0
self.quitting = 0
self.needmenubarredraw = 0
self._windows = {}
self._helpmenu = None
if nomenubar:
self.menubar = None
else:
self.makemenubar()
def __del__(self):
if self._doing_asyncevents:
self._doing_asyncevents = 0
MacOS.SetEventHandler()
def makemenubar(self):
self.menubar = MenuBar(self)
AppleMenu(self.menubar, self.getabouttext(), self.do_about)
self.makeusermenus()
def makeusermenus(self):
self.filemenu = m = Menu(self.menubar, "File")
self._quititem = MenuItem(m, "Quit", "Q", self._quit)
def gethelpmenu(self):
if self._helpmenu is None:
self._helpmenu = HelpMenu(self.menubar)
return self._helpmenu
def _quit(self, *args):
self.quitting = 1
def cleanup(self):
for w in self._windows.values():
w.do_close()
return self._windows == {}
def appendwindow(self, wid, window):
self._windows[wid] = window
def removewindow(self, wid):
del self._windows[wid]
def getabouttext(self):
return "About %s..." % self.__class__.__name__
def do_about(self, id, item, window, event):
EasyDialogs.Message("Hello, world!" + "\015(%s)" % self.__class__.__name__)
# The main event loop is broken up in several simple steps.
# This is done so you can override each individual part,
# if you have a need to do extra processing independent of the
# event type.
# Normally, however, you'd just define handlers for individual
# events.
schedparams = (0, 0) # By default disable Python's event handling
default_wait = None # By default we wait GetCaretTime in WaitNextEvent
def mainloop(self, mask = everyEvent, wait = None):
self.quitting = 0
if hasattr(MacOS, 'SchedParams'):
saveparams = MacOS.SchedParams(*self.schedparams)
try:
while not self.quitting:
try:
self.do1event(mask, wait)
except (Application, SystemExit):
# Note: the raising of "self" is old-fashioned idiom to
# exit the mainloop. Calling _quit() is better for new
# applications.
break
finally:
if hasattr(MacOS, 'SchedParams'):
MacOS.SchedParams(*saveparams)
def dopendingevents(self, mask = everyEvent):
"""dopendingevents - Handle all pending events"""
while self.do1event(mask, wait=0):
pass
def do1event(self, mask = everyEvent, wait = None):
ok, event = self.getevent(mask, wait)
if IsDialogEvent(event):
if self.do_dialogevent(event):
return
if ok:
self.dispatch(event)
else:
self.idle(event)
def idle(self, event):
pass
def getevent(self, mask = everyEvent, wait = None):
if self.needmenubarredraw:
DrawMenuBar()
self.needmenubarredraw = 0
if wait is None:
wait = self.default_wait
if wait is None:
wait = GetCaretTime()
ok, event = WaitNextEvent(mask, wait)
return ok, event
def dispatch(self, event):
# The following appears to be double work (already done in do1event)
# but we need it for asynchronous event handling
if IsDialogEvent(event):
if self.do_dialogevent(event):
return
(what, message, when, where, modifiers) = event
if what in eventname:
name = "do_" + eventname[what]
else:
name = "do_%d" % what
try:
handler = getattr(self, name)
except AttributeError:
handler = self.do_unknownevent
handler(event)
def asyncevents(self, onoff):
"""asyncevents - Set asynchronous event handling on or off"""
if MacOS.runtimemodel == 'macho':
raise 'Unsupported in MachoPython'
old = self._doing_asyncevents
if old:
MacOS.SetEventHandler()
MacOS.SchedParams(*self.schedparams)
if onoff:
MacOS.SetEventHandler(self.dispatch)
doint, dummymask, benice, howoften, bgyield = \
self.schedparams
MacOS.SchedParams(doint, everyEvent, benice,
howoften, bgyield)
self._doing_asyncevents = onoff
return old
def do_dialogevent(self, event):
gotone, dlg, item = DialogSelect(event)
if gotone:
window = dlg.GetDialogWindow()
if window in self._windows:
self._windows[window].do_itemhit(item, event)
else:
print 'Dialog event for unknown dialog'
return 1
return 0
def do_mouseDown(self, event):
(what, message, when, where, modifiers) = event
partcode, wid = FindWindow(where)
#
# Find the correct name.
#
if partcode in partname:
name = "do_" + partname[partcode]
else:
name = "do_%d" % partcode
if wid is None:
# No window, or a non-python window
try:
handler = getattr(self, name)
except AttributeError:
# Not menubar or something, so assume someone
# else's window
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
return
elif wid in self._windows:
# It is a window. Hand off to correct window.
window = self._windows[wid]
try:
handler = getattr(window, name)
except AttributeError:
handler = self.do_unknownpartcode
else:
# It is a python-toolbox window, but not ours.
handler = self.do_unknownwindow
handler(partcode, wid, event)
def do_inSysWindow(self, partcode, window, event):
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_inDesk(self, partcode, window, event):
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_inMenuBar(self, partcode, window, event):
if not self.menubar:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
return
(what, message, when, where, modifiers) = event
result = MenuSelect(where)
id = (result>>16) & 0xffff # Hi word
if id >= 0x8000:
id = -65536 + id
item = result & 0xffff # Lo word
self.do_rawmenu(id, item, window, event)
def do_rawmenu(self, id, item, window, event):
try:
self.do_menu(id, item, window, event)
finally:
HiliteMenu(0)
def do_menu(self, id, item, window, event):
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.menubar.dispatch(id, item, window, event)
def do_unknownpartcode(self, partcode, window, event):
(what, message, when, where, modifiers) = event
if DEBUG: print "Mouse down at global:", where
if DEBUG: print "\tUnknown part code:", partcode
if DEBUG: print "\tEvent:", self.printevent(event)
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_unknownwindow(self, partcode, window, event):
if DEBUG: print 'Unknown window:', window
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_keyDown(self, event):
self.do_key(event)
def do_autoKey(self, event):
if not event[-1] & cmdKey:
self.do_key(event)
def do_key(self, event):
(what, message, when, where, modifiers) = event
c = chr(message & charCodeMask)
if self.menubar:
result = MenuEvent(event)
id = (result>>16) & 0xffff # Hi word
item = result & 0xffff # Lo word
if id:
self.do_rawmenu(id, item, None, event)
return
# Otherwise we fall-through
if modifiers & cmdKey:
if c == '.':
raise self
else:
if not self.menubar:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
return
else:
# See whether the front window wants it
w = MyFrontWindow()
if w and w in self._windows:
window = self._windows[w]
try:
do_char = window.do_char
except AttributeError:
do_char = self.do_char
do_char(c, event)
# else it wasn't for us, sigh...
def do_char(self, c, event):
if DEBUG: print "Character", repr(c)
def do_updateEvt(self, event):
(what, message, when, where, modifiers) = event
wid = WhichWindow(message)
if wid and wid in self._windows:
window = self._windows[wid]
window.do_rawupdate(wid, event)
else:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_activateEvt(self, event):
(what, message, when, where, modifiers) = event
wid = WhichWindow(message)
if wid and wid in self._windows:
window = self._windows[wid]
window.do_activate(modifiers & 1, event)
else:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_osEvt(self, event):
(what, message, when, where, modifiers) = event
which = (message >> 24) & 0xff
if which == 1: # suspend/resume
self.do_suspendresume(event)
else:
if DEBUG:
print 'unknown osEvt:',
self.printevent(event)
def do_suspendresume(self, event):
(what, message, when, where, modifiers) = event
wid = MyFrontWindow()
if wid and wid in self._windows:
window = self._windows[wid]
window.do_activate(message & 1, event)
def do_kHighLevelEvent(self, event):
(what, message, when, where, modifiers) = event
if DEBUG:
print "High Level Event:",
self.printevent(event)
try:
AEProcessAppleEvent(event)
except:
pass
#print "AEProcessAppleEvent error:"
#traceback.print_exc()
def do_unknownevent(self, event):
if DEBUG:
print "Unhandled event:",
self.printevent(event)
def printevent(self, event):
(what, message, when, where, modifiers) = event
nicewhat = repr(what)
if what in eventname:
nicewhat = eventname[what]
print nicewhat,
if what == kHighLevelEvent:
h, v = where
print repr(ostypecode(message)), hex(when), repr(ostypecode(h | (v<<16))),
else:
print hex(message), hex(when), where,
print hex(modifiers)
class MenuBar:
"""Represent a set of menus in a menu bar.
Interface:
- (constructor)
- (destructor)
- addmenu
- addpopup (normally used internally)
- dispatch (called from Application)
"""
nextid = 1 # Necessarily a class variable
def getnextid(self):
id = MenuBar.nextid
MenuBar.nextid = id+1
return id
def __init__(self, parent=None):
self.parent = parent
ClearMenuBar()
self.bar = GetMenuBar()
self.menus = {}
# XXX necessary?
def close(self):
self.parent = None
self.bar = None
self.menus = None
def addmenu(self, title, after = 0, id=None):
if id is None:
id = self.getnextid()
if DEBUG: print 'Newmenu', title, id # XXXX
m = NewMenu(id, title)
m.InsertMenu(after)
if after >= 0:
if self.parent:
self.parent.needmenubarredraw = 1
else:
DrawMenuBar()
return id, m
def delmenu(self, id):
if DEBUG: print 'Delmenu', id # XXXX
DeleteMenu(id)
def addpopup(self, title = ''):
return self.addmenu(title, -1)
# Useless:
# def install(self):
# if not self.bar: return
# SetMenuBar(self.bar)
# if self.parent:
# self.parent.needmenubarredraw = 1
# else:
# DrawMenuBar()
def fixmenudimstate(self):
for m in self.menus.keys():
menu = self.menus[m]
if menu.__class__ == FrameWork.AppleMenu:
continue
for i in range(len(menu.items)):
label, shortcut, callback, kind = menu.items[i]
if type(callback) == types.StringType:
wid = MyFrontWindow()
if wid and wid in self.parent._windows:
window = self.parent._windows[wid]
if hasattr(window, "domenu_" + callback):
menu.menu.EnableMenuItem(i + 1)
elif hasattr(self.parent, "domenu_" + callback):
menu.menu.EnableMenuItem(i + 1)
else:
menu.menu.DisableMenuItem(i + 1)
elif hasattr(self.parent, "domenu_" + callback):
menu.menu.EnableMenuItem(i + 1)
else:
menu.menu.DisableMenuItem(i + 1)
elif callback:
pass
def dispatch(self, id, item, window, event):
if id in self.menus:
self.menus[id].dispatch(id, item, window, event)
else:
if DEBUG: print "MenuBar.dispatch(%d, %d, %s, %s)" % \
(id, item, window, event)
# XXX Need a way to get menus as resources and bind them to callbacks
class Menu:
"One menu."
def __init__(self, bar, title, after=0, id=None):
self.bar = bar
self.id, self.menu = self.bar.addmenu(title, after, id)
bar.menus[self.id] = self
self.items = []
self._parent = None
def delete(self):
self.bar.delmenu(self.id)
del self.bar.menus[self.id]
self.menu.DisposeMenu()
del self.bar
del self.items
del self.menu
del self.id
del self._parent
def additem(self, label, shortcut=None, callback=None, kind=None):
self.menu.AppendMenu('x') # add a dummy string
self.items.append((label, shortcut, callback, kind))
item = len(self.items)
if isinstance(label, unicode):
self.menu.SetMenuItemTextWithCFString(item, label)
else:
self.menu.SetMenuItemText(item, label)
if shortcut and type(shortcut) == type(()):
modifiers, char = shortcut[:2]
self.menu.SetItemCmd(item, ord(char))
self.menu.SetMenuItemModifiers(item, modifiers)
if len(shortcut) > 2:
self.menu.SetMenuItemKeyGlyph(item, shortcut[2])
elif shortcut:
self.menu.SetItemCmd(item, ord(shortcut))
return item
def delitem(self, item):
if item != len(self.items):
raise 'Can only delete last item of a menu'
self.menu.DeleteMenuItem(item)
del self.items[item-1]
def addcheck(self, label, shortcut=None, callback=None):
return self.additem(label, shortcut, callback, 'check')
def addradio(self, label, shortcut=None, callback=None):
return self.additem(label, shortcut, callback, 'radio')
def addseparator(self):
self.menu.AppendMenu('(-')
self.items.append(('', None, None, 'separator'))
def addsubmenu(self, label, title=''):
sub = Menu(self.bar, title, -1)
item = self.additem(label, '\x1B', None, 'submenu')
self.menu.SetItemMark(item, sub.id)
sub._parent = self
sub._parent_item = item
return sub
def dispatch(self, id, item, window, event):
title, shortcut, callback, mtype = self.items[item-1]
if callback:
if not self.bar.parent or type(callback) != types.StringType:
menuhandler = callback
else:
# callback is string
wid = MyFrontWindow()
if wid and wid in self.bar.parent._windows:
window = self.bar.parent._windows[wid]
if hasattr(window, "domenu_" + callback):
menuhandler = getattr(window, "domenu_" + callback)
elif hasattr(self.bar.parent, "domenu_" + callback):
menuhandler = getattr(self.bar.parent, "domenu_" + callback)
else:
# nothing we can do. we shouldn't have come this far
# since the menu item should have been disabled...
return
elif hasattr(self.bar.parent, "domenu_" + callback):
menuhandler = getattr(self.bar.parent, "domenu_" + callback)
else:
# nothing we can do. we shouldn't have come this far
# since the menu item should have been disabled...
return
menuhandler(id, item, window, event)
def enable(self, onoff):
if onoff:
self.menu.EnableMenuItem(0)
if self._parent:
self._parent.menu.EnableMenuItem(self._parent_item)
else:
self.menu.DisableMenuItem(0)
if self._parent:
self._parent.menu.DisableMenuItem(self._parent_item)
if self.bar and self.bar.parent:
self.bar.parent.needmenubarredraw = 1
class PopupMenu(Menu):
def __init__(self, bar):
Menu.__init__(self, bar, '(popup)', -1)
def popup(self, x, y, event, default=1, window=None):
# NOTE that x and y are global coordinates, and they should probably
# be topleft of the button the user clicked (not mouse-coordinates),
# so the popup nicely overlaps.
reply = self.menu.PopUpMenuSelect(x, y, default)
if not reply:
return
id = (reply >> 16) & 0xffff
item = reply & 0xffff
if not window:
wid = MyFrontWindow()
try:
window = self.bar.parent._windows[wid]
except:
pass # If we can't find the window we pass None
self.dispatch(id, item, window, event)
class MenuItem:
def __init__(self, menu, title, shortcut=None, callback=None, kind=None):
self.item = menu.additem(title, shortcut, callback)
self.menu = menu
def delete(self):
self.menu.delitem(self.item)
del self.menu
del self.item
def check(self, onoff):
self.menu.menu.CheckMenuItem(self.item, onoff)
def enable(self, onoff):
if onoff:
self.menu.menu.EnableMenuItem(self.item)
else:
self.menu.menu.DisableMenuItem(self.item)
def settext(self, text):
self.menu.menu.SetMenuItemText(self.item, text)
def setstyle(self, style):
self.menu.menu.SetItemStyle(self.item, style)
def seticon(self, icon):
self.menu.menu.SetItemIcon(self.item, icon)
def setcmd(self, cmd):
self.menu.menu.SetItemCmd(self.item, cmd)
def setmark(self, cmd):
self.menu.menu.SetItemMark(self.item, cmd)
class RadioItem(MenuItem):
def __init__(self, menu, title, shortcut=None, callback=None):
MenuItem.__init__(self, menu, title, shortcut, callback, 'radio')
class CheckItem(MenuItem):
def __init__(self, menu, title, shortcut=None, callback=None):
MenuItem.__init__(self, menu, title, shortcut, callback, 'check')
def Separator(menu):
menu.addseparator()
def SubMenu(menu, label, title=''):
return menu.addsubmenu(label, title)
class AppleMenu(Menu):
def __init__(self, bar, abouttext="About me...", aboutcallback=None):
Menu.__init__(self, bar, "\024", id=SIOUX_APPLEMENU_ID)
if MacOS.runtimemodel == 'ppc':
self.additem(abouttext, None, aboutcallback)
self.addseparator()
self.menu.AppendResMenu('DRVR')
else:
# Additem()'s tricks do not work for "apple" menu under Carbon
self.menu.InsertMenuItem(abouttext, 0)
self.items.append((abouttext, None, aboutcallback, None))
def dispatch(self, id, item, window, event):
if item == 1:
Menu.dispatch(self, id, item, window, event)
elif MacOS.runtimemodel == 'ppc':
name = self.menu.GetMenuItemText(item)
OpenDeskAcc(name)
class HelpMenu(Menu):
def __init__(self, bar):
# Note we don't call Menu.__init__, we do the necessary things by hand
self.bar = bar
self.menu, index = HMGetHelpMenu()
self.id = self.menu.GetMenuID()
bar.menus[self.id] = self
# The next line caters for the entries the system already handles for us
self.items = [None]*(index-1)
self._parent = None
class Window:
"""A single window belonging to an application"""
def __init__(self, parent):
self.wid = None
self.parent = parent
def open(self, bounds=(40, 40, 400, 400), resid=None):
if resid != None:
self.wid = GetNewWindow(resid, -1)
else:
self.wid = NewWindow(bounds, self.__class__.__name__, 1,
8, -1, 1, 0) # changed to proc id 8 to include zoom box. jvr
self.do_postopen()
def do_postopen(self):
"""Tell our parent we exist"""
self.parent.appendwindow(self.wid, self)
def close(self):
self.do_postclose()
def do_postclose(self):
self.parent.removewindow(self.wid)
self.parent = None
self.wid = None
def SetPort(self):
# Convinience method
SetPort(self.wid)
def GetWindow(self):
return self.wid
def do_inDrag(self, partcode, window, event):
where = event[3]
window.DragWindow(where, self.draglimit)
draglimit = screenbounds
def do_inGoAway(self, partcode, window, event):
where = event[3]
if window.TrackGoAway(where):
self.close()
def do_inZoom(self, partcode, window, event):
(what, message, when, where, modifiers) = event
if window.TrackBox(where, partcode):
window.ZoomWindow(partcode, 1)
rect = window.GetWindowUserState() # so that zoom really works... jvr
self.do_postresize(rect[2] - rect[0], rect[3] - rect[1], window) # jvr
def do_inZoomIn(self, partcode, window, event):
SetPort(window) # !!!
self.do_inZoom(partcode, window, event)
def do_inZoomOut(self, partcode, window, event):
SetPort(window) # !!!
self.do_inZoom(partcode, window, event)
def do_inGrow(self, partcode, window, event):
(what, message, when, where, modifiers) = event
result = window.GrowWindow(where, self.growlimit)
if result:
height = (result>>16) & 0xffff # Hi word
width = result & 0xffff # Lo word
self.do_resize(width, height, window)
growlimit = (50, 50, screenbounds[2] - screenbounds[0], screenbounds[3] - screenbounds[1]) # jvr
def do_resize(self, width, height, window):
l, t, r, b = self.wid.GetWindowPort().GetPortBounds() # jvr, forGrowIcon
self.SetPort() # jvr
self.wid.InvalWindowRect((r - SCROLLBARWIDTH + 1, b - SCROLLBARWIDTH + 1, r, b)) # jvr
window.SizeWindow(width, height, 1) # changed updateFlag to true jvr
self.do_postresize(width, height, window)
def do_postresize(self, width, height, window):
SetPort(window)
self.wid.InvalWindowRect(window.GetWindowPort().GetPortBounds())
def do_inContent(self, partcode, window, event):
#
# If we're not frontmost, select ourselves and wait for
# the activate event.
#
if MyFrontWindow() != window:
window.SelectWindow()
return
# We are. Handle the event.
(what, message, when, where, modifiers) = event
SetPort(window)
local = GlobalToLocal(where)
self.do_contentclick(local, modifiers, event)
def do_contentclick(self, local, modifiers, event):
if DEBUG:
print 'Click in contents at %s, modifiers %s'%(local, modifiers)
def do_rawupdate(self, window, event):
if DEBUG: print "raw update for", window
SetPort(window)
window.BeginUpdate()
self.do_update(window, event)
window.EndUpdate()
def do_update(self, window, event):
if DEBUG:
import time
for i in range(8):
time.sleep(0.1)
InvertRgn(window.GetWindowPort().visRgn)
FillRgn(window.GetWindowPort().visRgn, GetQDGlobalsGray())
else:
EraseRgn(window.GetWindowPort().visRgn)
def do_activate(self, activate, event):
if DEBUG: print 'Activate %d for %s'%(activate, self.wid)
class ControlsWindow(Window):
def do_rawupdate(self, window, event):
if DEBUG: print "raw update for", window
SetPort(window)
window.BeginUpdate()
self.do_update(window, event)
#DrawControls(window) # jvr
UpdateControls(window, window.GetWindowPort().visRgn) # jvr
window.DrawGrowIcon()
window.EndUpdate()
def do_controlhit(self, window, control, pcode, event):
if DEBUG: print "control hit in", window, "on", control, "; pcode =", pcode
def do_inContent(self, partcode, window, event):
if MyFrontWindow() != window:
window.SelectWindow()
return
(what, message, when, where, modifiers) = event
SetPort(window) # XXXX Needed?
local = GlobalToLocal(where)
pcode, control = FindControl(local, window)
if pcode and control:
self.do_rawcontrolhit(window, control, pcode, local, event)
else:
if DEBUG: print "FindControl(%s, %s) -> (%s, %s)" % \
(local, window, pcode, control)
self.do_contentclick(local, modifiers, event)
def do_rawcontrolhit(self, window, control, pcode, local, event):
pcode = control.TrackControl(local)
if pcode:
self.do_controlhit(window, control, pcode, event)
class ScrolledWindow(ControlsWindow):
def __init__(self, parent):
self.barx = self.bary = None
self.barx_enabled = self.bary_enabled = 1
self.activated = 1
ControlsWindow.__init__(self, parent)
def scrollbars(self, wantx=1, wanty=1):
SetPort(self.wid)
self.barx = self.bary = None
self.barx_enabled = self.bary_enabled = 1
x0, y0, x1, y1 = self.wid.GetWindowPort().GetPortBounds()
vx, vy = self.getscrollbarvalues()
if vx is None: self.barx_enabled, vx = 0, 0
if vy is None: self.bary_enabled, vy = 0, 0
if wantx:
rect = x0-1, y1-(SCROLLBARWIDTH-1), x1-(SCROLLBARWIDTH-2), y1+1
self.barx = NewControl(self.wid, rect, "", 1, vx, 0, 32767, 16, 0)
if not self.barx_enabled: self.barx.HiliteControl(255)
## self.wid.InvalWindowRect(rect)
if wanty:
rect = x1-(SCROLLBARWIDTH-1), y0-1, x1+1, y1-(SCROLLBARWIDTH-2)
self.bary = NewControl(self.wid, rect, "", 1, vy, 0, 32767, 16, 0)
if not self.bary_enabled: self.bary.HiliteControl(255)
## self.wid.InvalWindowRect(rect)
def do_postclose(self):
self.barx = self.bary = None
ControlsWindow.do_postclose(self)
def do_activate(self, onoff, event):
self.activated = onoff
if onoff:
if self.barx and self.barx_enabled:
self.barx.ShowControl() # jvr
if self.bary and self.bary_enabled:
self.bary.ShowControl() # jvr
else:
if self.barx:
self.barx.HideControl() # jvr; An inactive window should have *hidden*
# scrollbars, not just dimmed (no matter what
# BBEdit does... look at the Finder)
if self.bary:
self.bary.HideControl() # jvr
self.wid.DrawGrowIcon() # jvr
def do_postresize(self, width, height, window):
l, t, r, b = self.wid.GetWindowPort().GetPortBounds()
self.SetPort()
if self.barx:
self.barx.HideControl() # jvr
self.barx.MoveControl(l-1, b-(SCROLLBARWIDTH-1))
self.barx.SizeControl((r-l)-(SCROLLBARWIDTH-3), SCROLLBARWIDTH) # jvr
if self.bary:
self.bary.HideControl() # jvr
self.bary.MoveControl(r-(SCROLLBARWIDTH-1), t-1)
self.bary.SizeControl(SCROLLBARWIDTH, (b-t)-(SCROLLBARWIDTH-3)) # jvr
if self.barx:
self.barx.ShowControl() # jvr
self.wid.ValidWindowRect((l, b - SCROLLBARWIDTH + 1, r - SCROLLBARWIDTH + 2, b)) # jvr
if self.bary:
self.bary.ShowControl() # jvr
self.wid.ValidWindowRect((r - SCROLLBARWIDTH + 1, t, r, b - SCROLLBARWIDTH + 2)) # jvr
self.wid.InvalWindowRect((r - SCROLLBARWIDTH + 1, b - SCROLLBARWIDTH + 1, r, b)) # jvr, growicon
def do_rawcontrolhit(self, window, control, pcode, local, event):
if control == self.barx:
which = 'x'
elif control == self.bary:
which = 'y'
else:
return 0
if pcode in (inUpButton, inDownButton, inPageUp, inPageDown):
# We do the work for the buttons and grey area in the tracker
dummy = control.TrackControl(local, self.do_controltrack)
else:
# but the thumb is handled here
pcode = control.TrackControl(local)
if pcode == inThumb:
value = control.GetControlValue()
print 'setbars', which, value #DBG
self.scrollbar_callback(which, 'set', value)
self.updatescrollbars()
else:
print 'funny part', pcode #DBG
return 1
def do_controltrack(self, control, pcode):
if control == self.barx:
which = 'x'
elif control == self.bary:
which = 'y'
else:
return
if pcode == inUpButton:
what = '-'
elif pcode == inDownButton:
what = '+'
elif pcode == inPageUp:
what = '--'
elif pcode == inPageDown:
what = '++'
else:
return
self.scrollbar_callback(which, what, None)
self.updatescrollbars()
def updatescrollbars(self):
SetPort(self.wid)
vx, vy = self.getscrollbarvalues()
if self.barx:
if vx is None:
self.barx.HiliteControl(255)
self.barx_enabled = 0
else:
if not self.barx_enabled:
self.barx_enabled = 1
if self.activated:
self.barx.HiliteControl(0)
self.barx.SetControlValue(vx)
if self.bary:
if vy is None:
self.bary.HiliteControl(255)
self.bary_enabled = 0
else:
if not self.bary_enabled:
self.bary_enabled = 1
if self.activated:
self.bary.HiliteControl(0)
self.bary.SetControlValue(vy)
# Auxiliary function: convert standard text/image/etc coordinate
# to something palatable as getscrollbarvalues() return
def scalebarvalue(self, absmin, absmax, curmin, curmax):
if curmin <= absmin and curmax >= absmax:
return None
if curmin <= absmin:
return 0
if curmax >= absmax:
return 32767
perc = float(curmin-absmin)/float(absmax-absmin)
return int(perc*32767)
# To be overridden:
def getscrollbarvalues(self):
return 0, 0
def scrollbar_callback(self, which, what, value):
print 'scroll', which, what, value
class DialogWindow(Window):
"""A modeless dialog window"""
def open(self, resid):
self.dlg = GetNewDialog(resid, -1)
self.wid = self.dlg.GetDialogWindow()
self.do_postopen()
def close(self):
self.do_postclose()
def do_postclose(self):
self.dlg = None
Window.do_postclose(self)
def do_itemhit(self, item, event):
print 'Dialog %s, item %d hit'%(self.dlg, item)
def do_rawupdate(self, window, event):
pass
def ostypecode(x):
"Convert a long int to the 4-character code it really is"
s = ''
for i in range(4):
x, c = divmod(x, 256)
s = chr(c) + s
return s
class TestApp(Application):
"This class is used by the test() function"
def makeusermenus(self):
self.filemenu = m = Menu(self.menubar, "File")
self.saveitem = MenuItem(m, "Save", "S", self.save)
Separator(m)
self.optionsmenu = mm = SubMenu(m, "Options")
self.opt1 = CheckItem(mm, "Arguments", "A")
self.opt2 = CheckItem(mm, "Being hit on the head lessons", (kMenuOptionModifier, "A"))
self.opt3 = CheckItem(mm, "Complaints", (kMenuOptionModifier|kMenuNoCommandModifier, "A"))
Separator(m)
self.itemeh = MenuItem(m, "Enable Help", None, self.enablehelp)
self.itemdbg = MenuItem(m, "Debug", None, self.debug)
Separator(m)
self.quititem = MenuItem(m, "Quit", "Q", self.quit)
def save(self, *args):
print "Save"
def quit(self, *args):
raise self
def enablehelp(self, *args):
hm = self.gethelpmenu()
self.nohelpitem = MenuItem(hm, "There isn't any", None, self.nohelp)
def nohelp(self, *args):
print "I told you there isn't any!"
def debug(self, *args):
import pdb
pdb.set_trace()
def test():
"Test program"
app = TestApp()
app.mainloop()
if __name__ == '__main__':
test()
|
Mlieou/lXXtcode | refs/heads/master | leetcode/python/ex_531.py | 3 | class Solution(object):
def findLonelyPixel(self, picture):
"""
:type picture: List[List[str]]
:rtype: int
"""
if not picture or not picture[0]: return 0
row = [0] * len(picture)
col = [0] * len(picture[0])
for i in range(len(picture)):
for j in range(len(picture[0])):
if picture[i][j] == 'B':
row[i] += 1
col[j] += 1
count = 0
for i in range(len(picture)):
if row[i] != 1: continue
for j in range(len(picture[0])):
if col[j] != 1: continue
if picture[i][j] == 'B':
count += 1
return count |
neumodisch/RIOT | refs/heads/master | dist/tools/pyterm/testbeds/testbeds.py | 100 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Philipp Rosenkranz <philipp.rosenkranz@fu-berlin.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os, re, datetime
from subprocess import call, Popen, PIPE
class Testbed():
log_dir_name = 'log'
def __init__(self):
pass
def initCleanWithFlash(self):
self.stop()
self.cleanLogs()
self.flashNodes()
self.start()
def initClean(self):
self.cleanLogs()
self.start()
def flashNodes(self):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def cleanLogs(self):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def archiveLogs(self, experiment = None):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def start(self):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def stop(self):
raise NotImplementedError("Inherit from Testbed and implement flashNodes")
def defaultArchivePostfix(self, experimentName = None):
if not experimentName:
experimentName = "unknown"
time = datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S")
postfix = "-" + experimentName +"_" + time
return postfix
def printAndCall(self, cmdString):
print(cmdString)
call(cmdString, shell=True)
class DESTestbed(Testbed):
def __init__(self, serverHost = None, serverPort=None, userName = None, flasher = None,
hexfilePath = None, pyterm = None, logFilePath = None, hostFile = None):
self.serverHost = serverHost
self.serverPort = str(serverPort)
self.userName = userName
self.flasher = flasher
self.hexFilePath = hexfilePath
self.pyterm = pyterm
self.logFilePath = logFilePath
self.hostFile = hostFile
def flashNodes(self):
self.printAndCall("parallel-ssh -h %s -l %s 'python %s'" % (self.hostFile, self.userName, self.flasher))
def cleanLogs(self):
self.printAndCall("rm -rf %s/*.log" % (self.logFilePath))
def archiveLogs(self, postfix = None):
postfix = self.defaultArchivePostfix(postfix)
logDir = self.logFilePath.split("/")[-1]
self.printAndCall("cd %s/..; tar -cjf archived_logs%s.tar.bz2 %s/*.log" % (self.logFilePath, postfix, logDir))
def start(self):
self.printAndCall("parallel-ssh -h %s -l %s 'screen -S pyterm -d -m python %s -ln %s'" % (self.hostFile, self.userName, self.pyterm, self.log_dir_name))
def stop(self):
self.printAndCall("parallel-ssh -h %s -l %s 'screen -X -S pyterm quit'" % (self.hostFile, self.userName))
class LocalTestbed(Testbed):
def __init__(self, serverHost = None, serverPort=None, flasher = None, hexfilePath = None, pyterm = None, logFilePath = None):
self.serverHost = serverHost
self.serverPort = str(serverPort)
self.flasher = flasher
self.hexFilePath = hexfilePath
self.pyterm = pyterm
self.logFilePath = logFilePath
def findPorts(self):
devlist = os.listdir("/dev/")
regex = re.compile('^ttyUSB')
return sorted([port for port in devlist if regex.match(port)])
def flashNodes(self):
self.printAndCall("python %s %s" % (self.flasher, self.hexFilePath))
def cleanLogs(self):
self.printAndCall("rm -rf %s/*.log" % (self.logFilePath))
def archiveLogs(self, postfix = None):
postfix = self.defaultArchivePostfix(postfix)
logDir = self.logFilePath.split("/")[-1]
self.printAndCall("cd %s/..; tar -cjf archived_logs%s.tar.bz2 %s/*.log" % (self.logFilePath, postfix, logDir))
def start(self):
portList = self.findPorts()
for port in portList:
self.printAndCall("screen -S pyterm-%s -d -m python %s -H %s -rn %s -p /dev/%s -ln %s" % (port, self.pyterm, port, port, port, self.log_dir_name))
def stop(self):
portList = self.findPorts()
for port in portList:
self.printAndCall("screen -X -S pyterm-%s quit" % (port))
class DesVirtTestbed(Testbed):
def __init__(self, serverHost = None, serverPort=None, desvirtPath = None, topologyName = None, pyterm = None, logFilePath = None):
self.serverHost = serverHost
self.serverPort = str(serverPort)
self.desvirtPath = desvirtPath
self.topologyName = topologyName
self.pyterm = pyterm
self.logFilePath = logFilePath
self.namePortList = []
def findPorts(self):
return self.namePortList
def startDesVirtNetwork(self):
print "executing: " + "./vnet --start --name " + self.topologyName + " in: " + self.desvirtPath
call("sh -c \"./vnet --define --name " + self.topologyName + "\"", cwd=self.desvirtPath, shell=True)
stream = Popen("sh -c \"./vnet --start --name " + self.topologyName + "\"", cwd=self.desvirtPath, shell=True, stderr=PIPE).stderr
pats = r'.*riotnative.*\.elf (\S+) -t (\S+)'
pattern = re.compile(pats)
for line in stream:
match = pattern.match(line)
if(match):
tuple = match.groups()
self.namePortList.append((tuple[0], int(tuple[1])))
self.namePortList = sorted(self.namePortList)
for tuple in self.namePortList:
print "name: " + tuple[0] + " port: " + str(tuple[1])
def stopDesVirtNetwork(self):
call("sh -c \"./vnet --stop --name " + self.topologyName + "\"", cwd=self.desvirtPath, shell=True)
def flashNodes(self):
pass
def cleanLogs(self):
self.printAndCall("rm -rf %s/*.log" % (self.logFilePath))
def archiveLogs(self, postfix = None):
postfix = self.defaultArchivePostfix(postfix)
logDir = self.logFilePath.split("/")[-1]
self.printAndCall("cd %s/..; tar -cjf archived_logs%s.tar.bz2 %s/*.log" % (self.logFilePath, postfix, logDir))
def start(self):
for node in self.namePortList:
self.printAndCall("screen -S pyterm-%s -d -m python %s -H %s -rn %s -ts %s -ln %s" % (node[0], self.pyterm, node[0], node[0], node[1], self.log_dir_name))
def stop(self):
print "stop called"
for node in self.namePortList:
self.printAndCall("screen -X -S pyterm-%s quit" % (node[0]))
self.stopDesVirtNetwork()
|
MihaiMoldovanu/ansible | refs/heads/devel | lib/ansible/plugins/terminal/asa.py | 28 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(r"error:", re.I),
re.compile(br"Removing.* not allowed, it is being used")
]
def on_open_shell(self):
if self._get_prompt().strip().endswith(b'#'):
self.disable_pager()
def disable_pager(self):
cmd = {u'command': u'no terminal pager'}
try:
self._exec_cli_command(u'no terminal pager')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to disable terminal pager')
def on_authorize(self, passwd=None):
if self._get_prompt().strip().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
self.disable_pager()
|
victorbriz/rethinkdb | refs/heads/next | scripts/VirtuaBuild/smoke_test.py | 46 | #!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
# usage: ./smoke_test.py --mode OS_NAME --num-keys SOME_NUMBER_HERE
import time, sys, os, socket, random, time, signal, subprocess
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'test', 'common')))
import driver, memcached_workload_common
from vcoptparse import *
op = OptParser()
op["num_keys"] = IntFlag("--num-keys", 500)
op["mode"] = StringFlag("--mode", "debug")
op["pkg_type"] = StringFlag("--pkg-type", "deb") # "deb" or "rpm"
opts = op.parse(sys.argv)
num_keys = opts["num_keys"]
base_port = 11213 # port that RethinkDB runs from by default
if opts["pkg_type"] == "rpm":
def install(path):
return "rpm -i %s --nodeps" % path
def get_binary(path):
return "rpm -qpil %s | grep /usr/bin" % path
def uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs rpm -qf | xargs rpm -e" % cmd_name
elif opts["pkg_type"] == "deb":
def install(path):
return "dpkg -i %s" % path
def get_binary(path):
return "dpkg -c %s | grep /usr/bin/rethinkdb-.* | sed 's/^.*\(\/usr.*\)$/\\1/'" % path
def uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs dpkg -S | sed 's/^\(.*\):.*$/\\1/' | xargs dpkg -r" % cmd_name
else:
print >>sys.stderr, "Error: Unknown package type."
exit(0)
def purge_installed_packages():
try:
old_binaries_raw = exec_command(["ls", "/usr/bin/rethinkdb*"], shell = True).stdout.readlines()
except Exception, e:
print "Nothing to remove."
return
old_binaries = map(lambda x: x.strip('\n'), old_binaries_raw)
print "Binaries scheduled for removal: ", old_binaries
try:
exec_command(uninstall(old_binaries[0]), shell = True)
except Exception, e:
exec_command('rm -f ' + old_binaries[0])
purge_installed_packages()
def exec_command(cmd, bg = False, shell = False):
if type(cmd) == type("") and not shell:
cmd = cmd.split(" ")
elif type(cmd) == type([]) and shell:
cmd = " ".join(cmd)
print cmd
if bg:
return subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = shell) # doesn't actually run in background: it just skips the waiting part
else:
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = shell)
proc.wait()
if proc.poll():
raise RuntimeError("Error: command ended with signal %d." % proc.poll())
return proc
def wait_until_started_up(proc, host, port, timeout = 600):
time_limit = time.time() + timeout
while time.time() < time_limit:
if proc.poll() is not None:
raise RuntimeError("Process stopped unexpectedly with return code %d." % proc.poll())
s = socket.socket()
try:
s.connect((host, port))
except socket.error, e:
time.sleep(1)
else:
break
finally:
s.close()
else:
raise RuntimeError("Could not connect to process.")
def test_against(host, port, timeout = 600):
with memcached_workload_common.make_memcache_connection({"address": (host, port), "mclib": "pylibmc", "protocol": "text"}) as mc:
temp = 0
time_limit = time.time() + timeout
while not temp and time.time() < time_limit:
try:
temp = mc.set("test", "test")
print temp
except Exception, e:
print e
pass
time.sleep(1)
goodsets = 0
goodgets = 0
for i in range(num_keys):
try:
if mc.set(str(i), str(i)):
goodsets += 1
except:
pass
for i in range(num_keys):
try:
if mc.get(str(i)) == str(i):
goodgets += 1
except:
pass
return goodsets, goodgets
cur_dir = exec_command("pwd").stdout.readline().strip('\n')
p = exec_command("find build/%s -name *.%s" % (opts["mode"], opts["pkg_type"]))
raw = p.stdout.readlines()
res_paths = map(lambda x: os.path.join(cur_dir, x.strip('\n')), raw)
print "Packages to install:", res_paths
failed_test = False
for path in res_paths:
print "TESTING A NEW PACKAGE"
print "Uninstalling old packages..."
purge_installed_packages()
print "Done uninstalling..."
print "Installing RethinkDB..."
target_binary_name = exec_command(get_binary(path), shell = True).stdout.readlines()[0].strip('\n')
print "Target binary name:", target_binary_name
exec_command(install(path))
print "Starting RethinkDB..."
exec_command("rm -rf rethinkdb_data")
exec_command("rm -f core.*")
proc = exec_command("rethinkdb", bg = True)
# gets the IP address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("rethinkdb.com", 80))
ip = s.getsockname()[0]
s.close()
print "IP Address detected:", ip
wait_until_started_up(proc, ip, base_port)
print "Testing..."
res = test_against(ip, base_port)
print "Tests completed. Killing instance now..."
proc.send_signal(signal.SIGINT)
timeout = 60 # 1 minute to shut down
time_limit = time.time() + timeout
while proc.poll() is None and time.time() < time_limit:
pass
if proc.poll() != 0:
print "RethinkDB failed to shut down properly. (TEST FAILED)"
failed_test = False
if res != (num_keys, num_keys):
print "Done: FAILED"
print "Results: %d successful sets, %d successful gets (%d total)" % (res[0], res[1], num_keys)
failed_test = True
else:
print "Done: PASSED"
print "Done."
if failed_test:
exit(1)
else:
exit(0)
|
Ichag/openerp-server | refs/heads/master | openerp/tools/amount_to_text.py | 31 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#-------------------------------------------------------------
# French
#-------------------------------------------------------------
to_19_fr = ( 'zéro', 'un', 'deux', 'trois', 'quatre', 'cinq', 'six',
'sept', 'huit', 'neuf', 'dix', 'onze', 'douze', 'treize',
'quatorze', 'quinze', 'seize', 'dix-sept', 'dix-huit', 'dix-neuf' )
tens_fr = ( 'vingt', 'trente', 'quarante', 'Cinquante', 'Soixante', 'Soixante-dix', 'Quatre-vingts', 'Quatre-vingt Dix')
denom_fr = ( '',
'Mille', 'Millions', 'Milliards', 'Billions', 'Quadrillions',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Décillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Icosillion', 'Vigintillion' )
def _convert_nn_fr(val):
""" convert a value < 100 to French
"""
if val < 20:
return to_19_fr[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens_fr)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19_fr[val % 10]
return dcap
def _convert_nnn_fr(val):
""" convert a value < 1000 to french
special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19_fr[rem] + ' Cent'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn_fr(mod)
return word
def french_number(val):
if val < 100:
return _convert_nn_fr(val)
if val < 1000:
return _convert_nnn_fr(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom_fr))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn_fr(l) + ' ' + denom_fr[didx]
if r > 0:
ret = ret + ', ' + french_number(r)
return ret
def amount_to_text_fr(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = french_number(abs(int(list[0])))
end_word = french_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and ' Cents' or ' Cent'
final_result = start_word +' '+units_name+' '+ end_word +' '+cents_name
return final_result
#-------------------------------------------------------------
# Dutch
#-------------------------------------------------------------
to_19_nl = ( 'Nul', 'Een', 'Twee', 'Drie', 'Vier', 'Vijf', 'Zes',
'Zeven', 'Acht', 'Negen', 'Tien', 'Elf', 'Twaalf', 'Dertien',
'Veertien', 'Vijftien', 'Zestien', 'Zeventien', 'Achttien', 'Negentien' )
tens_nl = ( 'Twintig', 'Dertig', 'Veertig', 'Vijftig', 'Zestig', 'Zeventig', 'Tachtig', 'Negentig')
denom_nl = ( '',
'Duizend', 'Miljoen', 'Miljard', 'Triljoen', 'Quadriljoen',
'Quintillion', 'Sextiljoen', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn_nl(val):
""" convert a value < 100 to Dutch
"""
if val < 20:
return to_19_nl[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens_nl)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19_nl[val % 10]
return dcap
def _convert_nnn_nl(val):
""" convert a value < 1000 to Dutch
special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19_nl[rem] + ' Honderd'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn_nl(mod)
return word
def dutch_number(val):
if val < 100:
return _convert_nn_nl(val)
if val < 1000:
return _convert_nnn_nl(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom_nl))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn_nl(l) + ' ' + denom_nl[didx]
if r > 0:
ret = ret + ', ' + dutch_number(r)
return ret
def amount_to_text_nl(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = dutch_number(int(list[0]))
end_word = dutch_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'cent' or 'cent'
final_result = start_word +' '+units_name+' '+ end_word +' '+cents_name
return final_result
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'fr' : amount_to_text_fr, 'nl' : amount_to_text_nl}
def add_amount_to_text_function(lang, func):
_translate_funcs[lang] = func
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='fr', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: mille six cent cinquante-quatre.
"""
# if nbr > 1000000:
##TODO: use logger
# print "WARNING: number too large '%d', can't translate it!" % (nbr,)
# return str(nbr)
if not _translate_funcs.has_key(lang):
#TODO: use logger
print "WARNING: no translation function found for lang: '%s'" % (lang,)
#TODO: (default should be en) same as above
lang = 'fr'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", amount_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", amount_to_text(i, lang)
else:
print amount_to_text(int(argv[1]), lang)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Tivix/wagtail | refs/heads/master | wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py | 4 | from __future__ import unicode_literals
from django.conf import settings
from django import template
from django.contrib.humanize.templatetags.humanize import intcomma
from django.template.defaultfilters import stringfilter
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction
from wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script
from wagtail.wagtailcore.utils import cautious_slugify as _cautious_slugify
from wagtail.wagtailadmin.menu import admin_menu
from wagtail.utils.pagination import DEFAULT_PAGE_KEY
register = template.Library()
register.filter('intcomma', intcomma)
@register.inclusion_tag('wagtailadmin/shared/explorer_nav.html')
def explorer_nav():
return {
'nodes': get_navigation_menu_items()
}
@register.inclusion_tag('wagtailadmin/shared/explorer_nav_child.html')
def explorer_subnav(nodes):
return {
'nodes': nodes
}
@register.inclusion_tag('wagtailadmin/shared/main_nav.html', takes_context=True)
def main_nav(context):
request = context['request']
return {
'menu_html': admin_menu.render_html(request),
'request': request,
}
@register.simple_tag
def main_nav_js():
return admin_menu.media['js']
@register.filter("ellipsistrim")
def ellipsistrim(value, max_length):
if len(value) > max_length:
truncd_val = value[:max_length]
if not len(value) == (max_length + 1) and value[max_length + 1] != " ":
truncd_val = truncd_val[:truncd_val.rfind(" ")]
return truncd_val + "..."
return value
@register.filter
def fieldtype(bound_field):
try:
return camelcase_to_underscore(bound_field.field.__class__.__name__)
except AttributeError:
try:
return camelcase_to_underscore(bound_field.__class__.__name__)
except AttributeError:
return ""
@register.filter
def widgettype(bound_field):
try:
return camelcase_to_underscore(bound_field.field.widget.__class__.__name__)
except AttributeError:
try:
return camelcase_to_underscore(bound_field.widget.__class__.__name__)
except AttributeError:
return ""
@register.assignment_tag(takes_context=True)
def page_permissions(context, page):
"""
Usage: {% page_permissions page as page_perms %}
Sets the variable 'page_perms' to a PagePermissionTester object that can be queried to find out
what actions the current logged-in user can perform on the given page.
"""
# Create a UserPagePermissionsProxy object to represent the user's global permissions, and
# cache it in the context for the duration of the page request, if one does not exist already
if 'user_page_permissions' not in context:
context['user_page_permissions'] = UserPagePermissionsProxy(context['request'].user)
# Now retrieve a PagePermissionTester from it, specific to the given page
return context['user_page_permissions'].for_page(page)
@register.assignment_tag(takes_context=True)
def test_page_is_public(context, page):
"""
Usage: {% test_page_is_public page as is_public %}
Sets 'is_public' to True iff there are no page view restrictions in place on
this page.
Caches the list of page view restrictions in the context, to avoid repeated
DB queries on repeated calls.
"""
if 'all_page_view_restriction_paths' not in context:
context['all_page_view_restriction_paths'] = PageViewRestriction.objects.select_related('page').values_list('page__path', flat=True)
is_private = any([
page.path.startswith(restricted_path)
for restricted_path in context['all_page_view_restriction_paths']
])
return not is_private
@register.simple_tag
def hook_output(hook_name):
"""
Example: {% hook_output 'insert_editor_css' %}
Whenever we have a hook whose functions take no parameters and return a string, this tag can be used
to output the concatenation of all of those return values onto the page.
Note that the output is not escaped - it is the hook function's responsibility to escape unsafe content.
"""
snippets = [fn() for fn in hooks.get_hooks(hook_name)]
return ''.join(snippets)
@register.assignment_tag
def usage_count_enabled():
return getattr(settings, 'WAGTAIL_USAGE_COUNT_ENABLED', False)
@register.assignment_tag
def base_url_setting():
return getattr(settings, 'BASE_URL', None)
class EscapeScriptNode(template.Node):
TAG_NAME = 'escapescript'
def __init__(self, nodelist):
super(EscapeScriptNode, self).__init__()
self.nodelist = nodelist
def render(self, context):
out = self.nodelist.render(context)
return escape_script(out)
@classmethod
def handle(cls, parser, token):
nodelist = parser.parse(('end' + EscapeScriptNode.TAG_NAME,))
parser.delete_first_token()
return cls(nodelist)
register.tag(EscapeScriptNode.TAG_NAME, EscapeScriptNode.handle)
# Helpers for Widget.render_with_errors, our extension to the Django widget API that allows widgets to
# take on the responsibility of rendering their own error messages
@register.filter
def render_with_errors(bound_field):
"""
Usage: {{ field|render_with_errors }} as opposed to {{ field }}.
If the field (a BoundField instance) has errors on it, and the associated widget implements
a render_with_errors method, call that; otherwise, call the regular widget rendering mechanism.
"""
widget = bound_field.field.widget
if bound_field.errors and hasattr(widget, 'render_with_errors'):
return widget.render_with_errors(bound_field.html_name, bound_field.value(), attrs={'id': bound_field.auto_id}, errors=bound_field.errors)
else:
return bound_field.as_widget()
@register.filter
def has_unrendered_errors(bound_field):
"""
Return true if this field has errors that were not accounted for by render_with_errors, because
the widget does not support the render_with_errors method
"""
return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')
@register.filter(is_safe=True)
@stringfilter
def cautious_slugify(value):
return _cautious_slugify(value)
@register.simple_tag(takes_context=True)
def querystring(context, **kwargs):
"""
Print out the current querystring. Any keyword arguments to this template
tag will be added to the querystring before it is printed out.
<a href="/page/{% querystring key='value' %}">
Will result in something like:
<a href="/page/?foo=bar&key=value">
"""
request = context['request']
querydict = request.GET.copy()
# Can't do querydict.update(kwargs), because QueryDict.update() appends to
# the list of values, instead of replacing the values.
for key, value in kwargs.items():
if value is None:
# Remove the key if the value is None
querydict.pop(key, None)
else:
# Set the key otherwise
querydict[key] = value
return '?' + querydict.urlencode()
@register.simple_tag(takes_context=True)
def pagination_querystring(context, page_number, page_key=DEFAULT_PAGE_KEY):
"""
Print out a querystring with an updated page number:
{% if page.has_next_page %}
<a href="{% pagination_link page.next_page_number %}">Next page</a>
{% endif %}
"""
return querystring(context, **{page_key: page_number})
@register.inclusion_tag("wagtailadmin/pages/listing/_pagination.html",
takes_context=True)
def paginate(context, page, base_url='', page_key=DEFAULT_PAGE_KEY,
classnames=''):
"""
Print pagination previous/next links, and the page count. Take the
following arguments:
page
The current page of results. This should be a Django pagination `Page`
instance
base_url
The base URL of the next/previous page, with no querystring.
This is optional, and defaults to the current page by just printing the
querystring for the next/previous page.
page_key
The name of the page variable in the query string. Defaults to the same
name as used in the :func:`~wagtail.utils.pagination.paginate`
function.
classnames
Extra classes to add to the next/previous links.
"""
request = context['request']
return {
'base_url': base_url,
'classnames': classnames,
'request': request,
'page': page,
'page_key': page_key,
'paginator': page.paginator,
}
|
fillycheezstake/MissionPlanner | refs/heads/master | Lib/ctypes/_endian.py | 51 | ######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
import sys
from ctypes import *
_array_type = type(c_int * 3)
def _other_endian(typ):
"""Return the type with the 'other' byte order. Simple types like
c_int and so on already have __ctype_be__ and __ctype_le__
attributes which contain the types, for more complicated types
only arrays are supported.
"""
try:
return getattr(typ, _OTHER_ENDIAN)
except AttributeError:
if type(typ) == _array_type:
return _other_endian(typ._type_) * typ._length_
raise TypeError("This type does not support other endian: %s" % typ)
class _swapped_meta(type(Structure)):
def __setattr__(self, attrname, value):
if attrname == "_fields_":
fields = []
for desc in value:
name = desc[0]
typ = desc[1]
rest = desc[2:]
fields.append((name, _other_endian(typ)) + rest)
value = fields
super(_swapped_meta, self).__setattr__(attrname, value)
################################################################
# Note: The Structure metaclass checks for the *presence* (not the
# value!) of a _swapped_bytes_ attribute to determine the bit order in
# structures containing bit fields.
if sys.byteorder == "little":
_OTHER_ENDIAN = "__ctype_be__"
LittleEndianStructure = Structure
class BigEndianStructure(Structure):
"""Structure with big endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
elif sys.byteorder == "big":
_OTHER_ENDIAN = "__ctype_le__"
BigEndianStructure = Structure
class LittleEndianStructure(Structure):
"""Structure with little endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
else:
raise RuntimeError("Invalid byteorder")
|
talau/ns-3.18-wifi-queue-red | refs/heads/master | .waf-1.7.11-edc6ccb516c5e3f9b892efc9f53a610f/waflib/Tools/c_tests.py | 330 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Task
from waflib.Configure import conf
from waflib.TaskGen import feature,before_method,after_method
import sys
LIB_CODE='''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllexport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void) { return 9; }
'''
MAIN_CODE='''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllimport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void);
int main(int argc, char **argv) {
(void)argc; (void)argv;
return !(lib_func() == 9);
}
'''
@feature('link_lib_test')
@before_method('process_source')
def link_lib_test_fun(self):
def write_test_file(task):
task.outputs[0].write(task.generator.code)
rpath=[]
if getattr(self,'add_rpath',False):
rpath=[self.bld.path.get_bld().abspath()]
mode=self.mode
m='%s %s'%(mode,mode)
ex=self.test_exec and'test_exec'or''
bld=self.bld
bld(rule=write_test_file,target='test.'+mode,code=LIB_CODE)
bld(rule=write_test_file,target='main.'+mode,code=MAIN_CODE)
bld(features='%sshlib'%m,source='test.'+mode,target='test')
bld(features='%sprogram %s'%(m,ex),source='main.'+mode,target='app',use='test',rpath=rpath)
@conf
def check_library(self,mode=None,test_exec=True):
if not mode:
mode='c'
if self.env.CXX:
mode='cxx'
self.check(compile_filename=[],features='link_lib_test',msg='Checking for libraries',mode=mode,test_exec=test_exec,)
INLINE_CODE='''
typedef int foo_t;
static %s foo_t static_foo () {return 0; }
%s foo_t foo () {
return 0;
}
'''
INLINE_VALUES=['inline','__inline__','__inline']
@conf
def check_inline(self,**kw):
self.start_msg('Checking for inline')
if not'define_name'in kw:
kw['define_name']='INLINE_MACRO'
if not'features'in kw:
if self.env.CXX:
kw['features']=['cxx']
else:
kw['features']=['c']
for x in INLINE_VALUES:
kw['fragment']=INLINE_CODE%(x,x)
try:
self.check(**kw)
except self.errors.ConfigurationError:
continue
else:
self.end_msg(x)
if x!='inline':
self.define('inline',x,quote=False)
return x
self.fatal('could not use inline functions')
LARGE_FRAGMENT='''#include <unistd.h>
int main(int argc, char **argv) {
(void)argc; (void)argv;
return !(sizeof(off_t) >= 8);
}
'''
@conf
def check_large_file(self,**kw):
if not'define_name'in kw:
kw['define_name']='HAVE_LARGEFILE'
if not'execute'in kw:
kw['execute']=True
if not'features'in kw:
if self.env.CXX:
kw['features']=['cxx','cxxprogram']
else:
kw['features']=['c','cprogram']
kw['fragment']=LARGE_FRAGMENT
kw['msg']='Checking for large file support'
ret=True
try:
if self.env.DEST_BINFMT!='pe':
ret=self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
if ret:
return True
kw['msg']='Checking for -D_FILE_OFFSET_BITS=64'
kw['defines']=['_FILE_OFFSET_BITS=64']
try:
ret=self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
self.define('_FILE_OFFSET_BITS',64)
return ret
self.fatal('There is no support for large files')
ENDIAN_FRAGMENT='''
short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
int use_ascii (int i) {
return ascii_mm[i] + ascii_ii[i];
}
short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
int use_ebcdic (int i) {
return ebcdic_mm[i] + ebcdic_ii[i];
}
extern int foo;
'''
class grep_for_endianness(Task.Task):
color='PINK'
def run(self):
txt=self.inputs[0].read(flags='rb').decode('iso8859-1')
if txt.find('LiTTleEnDian')>-1:
self.generator.tmp.append('little')
elif txt.find('BIGenDianSyS')>-1:
self.generator.tmp.append('big')
else:
return-1
@feature('grep_for_endianness')
@after_method('process_source')
def grep_for_endianness_fun(self):
self.create_task('grep_for_endianness',self.compiled_tasks[0].outputs[0])
@conf
def check_endianness(self):
tmp=[]
def check_msg(self):
return tmp[0]
self.check(fragment=ENDIAN_FRAGMENT,features='c grep_for_endianness',msg="Checking for endianness",define='ENDIANNESS',tmp=tmp,okmsg=check_msg)
return tmp[0]
|
jpetto/bedrock | refs/heads/master | bedrock/tabzilla/models.py | 1447 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
lokeshjindal15/pd-gem5 | refs/heads/master | src/arch/x86/isa/insts/general_purpose/logical.py | 89 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop OR_R_R
{
or reg, reg, regm, flags=(OF,SF,ZF,PF,CF)
};
def macroop OR_M_I
{
limm t2, imm
ldst t1, seg, sib, disp
or t1, t1, t2, flags=(OF,SF,ZF,PF,CF)
st t1, seg, sib, disp
};
def macroop OR_P_I
{
limm t2, imm
rdip t7
ldst t1, seg, riprel, disp
or t1, t1, t2, flags=(OF,SF,ZF,PF,CF)
st t1, seg, riprel, disp
};
def macroop OR_LOCKED_M_I
{
limm t2, imm
mfence
ldstl t1, seg, sib, disp
or t1, t1, t2, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, sib, disp
mfence
};
def macroop OR_LOCKED_P_I
{
limm t2, imm
rdip t7
mfence
ldstl t1, seg, riprel, disp
or t1, t1, t2, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, riprel, disp
mfence
};
def macroop OR_M_R
{
ldst t1, seg, sib, disp
or t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
st t1, seg, sib, disp
};
def macroop OR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
or t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
st t1, seg, riprel, disp
};
def macroop OR_LOCKED_M_R
{
mfence
ldstl t1, seg, sib, disp
or t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, sib, disp
mfence
};
def macroop OR_LOCKED_P_R
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
or t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, riprel, disp
mfence
};
def macroop OR_R_M
{
ld t1, seg, sib, disp
or reg, reg, t1, flags=(OF,SF,ZF,PF,CF)
};
def macroop OR_R_P
{
rdip t7
ld t1, seg, riprel, disp
or reg, reg, t1, flags=(OF,SF,ZF,PF,CF)
};
def macroop OR_R_I
{
limm t1, imm
or reg, reg, t1, flags=(OF,SF,ZF,PF,CF)
};
def macroop XOR_R_R
{
xor reg, reg, regm, flags=(OF,SF,ZF,PF,CF)
};
def macroop XOR_R_I
{
limm t1, imm
xor reg, reg, t1, flags=(OF,SF,ZF,PF,CF)
};
def macroop XOR_M_I
{
limm t2, imm
ldst t1, seg, sib, disp
xor t1, t1, t2, flags=(OF,SF,ZF,PF,CF)
st t1, seg, sib, disp
};
def macroop XOR_P_I
{
limm t2, imm
rdip t7
ldst t1, seg, riprel, disp
xor t1, t1, t2, flags=(OF,SF,ZF,PF,CF)
st t1, seg, riprel, disp
};
def macroop XOR_LOCKED_M_I
{
limm t2, imm
mfence
ldstl t1, seg, sib, disp
xor t1, t1, t2, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, sib, disp
mfence
};
def macroop XOR_LOCKED_P_I
{
limm t2, imm
rdip t7
mfence
ldstl t1, seg, riprel, disp
xor t1, t1, t2, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, riprel, disp
mfence
};
def macroop XOR_M_R
{
ldst t1, seg, sib, disp
xor t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
st t1, seg, sib, disp
};
def macroop XOR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
xor t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
st t1, seg, riprel, disp
};
def macroop XOR_LOCKED_M_R
{
mfence
ldstl t1, seg, sib, disp
xor t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, sib, disp
mfence
};
def macroop XOR_LOCKED_P_R
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
xor t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, riprel, disp
mfence
};
def macroop XOR_R_M
{
ld t1, seg, sib, disp
xor reg, reg, t1, flags=(OF,SF,ZF,PF,CF)
};
def macroop XOR_R_P
{
rdip t7
ld t1, seg, riprel, disp
xor reg, reg, t1, flags=(OF,SF,ZF,PF,CF)
};
def macroop AND_R_R
{
and reg, reg, regm, flags=(OF,SF,ZF,PF,CF)
};
def macroop AND_R_M
{
ld t1, seg, sib, disp
and reg, reg, t1, flags=(OF,SF,ZF,PF,CF)
};
def macroop AND_R_P
{
rdip t7
ld t1, seg, riprel, disp
and reg, reg, t1, flags=(OF,SF,ZF,PF,CF)
};
def macroop AND_R_I
{
limm t1, imm
and reg, reg, t1, flags=(OF,SF,ZF,PF,CF)
};
def macroop AND_M_I
{
ldst t2, seg, sib, disp
limm t1, imm
and t2, t2, t1, flags=(OF,SF,ZF,PF,CF)
st t2, seg, sib, disp
};
def macroop AND_P_I
{
rdip t7
ldst t2, seg, riprel, disp
limm t1, imm
and t2, t2, t1, flags=(OF,SF,ZF,PF,CF)
st t2, seg, riprel, disp
};
def macroop AND_LOCKED_M_I
{
mfence
ldstl t2, seg, sib, disp
limm t1, imm
and t2, t2, t1, flags=(OF,SF,ZF,PF,CF)
stul t2, seg, sib, disp
mfence
};
def macroop AND_LOCKED_P_I
{
rdip t7
mfence
ldstl t2, seg, riprel, disp
limm t1, imm
and t2, t2, t1, flags=(OF,SF,ZF,PF,CF)
stul t2, seg, riprel, disp
mfence
};
def macroop AND_M_R
{
ldst t1, seg, sib, disp
and t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
st t1, seg, sib, disp
};
def macroop AND_P_R
{
rdip t7
ldst t1, seg, riprel, disp
and t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
st t1, seg, riprel, disp
};
def macroop AND_LOCKED_M_R
{
mfence
ldstl t1, seg, sib, disp
and t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, sib, disp
mfence
};
def macroop AND_LOCKED_P_R
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
and t1, t1, reg, flags=(OF,SF,ZF,PF,CF)
stul t1, seg, riprel, disp
mfence
};
def macroop NOT_R
{
limm t1, -1
xor reg, reg, t1
};
def macroop NOT_M
{
limm t1, -1
ldst t2, seg, sib, disp
xor t2, t2, t1
st t2, seg, sib, disp
};
def macroop NOT_P
{
limm t1, -1
rdip t7
ldst t2, seg, riprel, disp
xor t2, t2, t1
st t2, seg, riprel, disp
};
def macroop NOT_LOCKED_M
{
limm t1, -1
mfence
ldstl t2, seg, sib, disp
xor t2, t2, t1
stul t2, seg, sib, disp
mfence
};
def macroop NOT_LOCKED_P
{
limm t1, -1
rdip t7
mfence
ldstl t2, seg, riprel, disp
xor t2, t2, t1
stul t2, seg, riprel, disp
mfence
};
'''
|
ella/ella-polls | refs/heads/master | test_ella_polls/urls.py | 1 | from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^polls', include('ella_polls.urls')),
(r'^', include('ella.core.urls')),
)
|
movicha/dcos | refs/heads/master | packages/spartan/extra/gen_resolvconf.py | 1 | #!/opt/mesosphere/bin/python
import os
import socket
import sys
import random
import dns.query
# Constants
MAX_SERVER_COUNT = 3
SPARTANS = ['198.51.100.1', '198.51.100.2', '198.51.100.3']
if len(sys.argv) != 2:
print('Usage: gen_resolvconf.py RESOLV_CONF_PATH', file=sys.stderr)
print('Received: {}'.format(sys.argv), file=sys.stderr)
sys.exit(-1)
resolvconf_path = sys.argv[1]
dns_test_query = 'ready.spartan'
dns_timeout = 5
def check_server(addr):
try:
query = dns.message.make_query(dns_test_query, dns.rdatatype.ANY)
result = dns.query.udp(query, addr, dns_timeout)
if len(result.answer) == 0:
print('Skipping DNS server {}: no records for {}'.format(
addr, dns_test_query), file=sys.stderr)
else:
return True
except socket.gaierror as ex:
print(ex, file=sys.stderr)
except dns.exception.Timeout:
print('Skipping DNS server {}: no response'.format(
addr), file=sys.stderr)
except:
print("Unexpected error querying DNS for server \"{}\" exception: {}".format(
addr, sys.exc_info()[1]))
return False
contents = """# Generated by gen_resolvconf.py. Do not edit.
# Change configuration options by changing DC/OS cluster configuration.
# This file must be overwritten regularly for proper cluster operation around
# master failure.
options timeout:1
options attempts:3
"""
if 'SEARCH' in os.environ:
contents += "search {}\n".format(os.environ['SEARCH'])
# Check if Spartan is up
spartans_up = []
for ns in SPARTANS:
if check_server(ns):
spartans_up.append(ns)
if len(spartans_up) > 0:
for ns in spartans_up:
contents += "nameserver {}\n".format(ns)
# If Spartan is not up, fall back, and insert the upstreams
else:
fallback_servers = os.environ['RESOLVERS'].split(',')
random.shuffle(fallback_servers)
for ns in fallback_servers[:MAX_SERVER_COUNT]:
contents += "nameserver {}\n".format(ns)
# Generate the resolv.conf config
print('Updating {}'.format(resolvconf_path))
with open(resolvconf_path, 'w') as f:
print(contents, file=sys.stderr)
f.write(contents)
sys.exit(0)
|
lycanthia/Find7-Kernel-Source-4.3 | refs/heads/master | tools/perf/util/setup.py | 4998 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
AlanZheng/heekscnc | refs/heads/master | pycnc/Operation.py | 24 | from Object import Object
from consts import *
import HeeksCNC
from CNCConfig import CNCConfig
class Operation(Object):
def __init__(self):
Object.__init__(self)
self.active = True
self.comment = ''
self.title = self.TypeName()
self.tool_number = 0
def TypeName(self):
return "Operation"
def icon(self):
# the name of the PNG file in the HeeksCNC icons folder
if self.active:
return self.op_icon()
else:
return "noentry"
def CanBeDeleted(self):
return True
def UsesTool(self): # some operations don't use the tool number
return True
def ReadDefaultValues(self):
config = CNCConfig()
self.tool_number = config.ReadInt("OpTool", 0)
if self.tool_number != 0:
default_tool = HeeksCNC.program.tools.FindTool(self.tool_number)
if default_tool == None:
self.tool_number = 0
else:
self.tool_number = default_tool.tool_number
if self.tool_number == 0:
first_tool = HeeksCNC.program.tools.FindFirstTool(TOOL_TYPE_SLOTCUTTER)
if first_tool:
self.tool_number = first_tool.tool_number
else:
first_tool = HeeksCNC.program.tools.FindFirstTool(TOOL_TYPE_ENDMILL)
if first_tool:
self.tool_number = first_tool.tool_number
else:
first_tool = HeeksCNC.program.tools.FindFirstTool(TOOL_TYPE_BALLENDMILL)
if first_tool:
self.tool_number = first_tool.tool_number
def WriteDefaultValues(self):
config = CNCConfig()
if self.tool_number != 0:
config.WriteInt("OpTool", self.tool_number)
def AppendTextToProgram(self):
if len(self.comment) > 0:
HeeksCNC.program.python_program += "comment(" + self.comment + ")\n"
if self.UsesTool():
HeeksCNC.machine_state.AppendToolChangeText(self.tool_number) # Select the correct tool.
|
jdar/phantomjs-modified | refs/heads/master | src/qt/qtwebkit/Tools/QueueStatusServer/filters/webkit_extras.py | 121 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from django.template.defaultfilters import stringfilter
from google.appengine.ext import webapp
register = webapp.template.create_template_register()
bug_regexp = re.compile(r"bug (?P<bug_id>\d+)")
patch_regexp = re.compile(r"patch (?P<patch_id>\d+)")
@register.filter
@stringfilter
def webkit_linkify(value):
value = bug_regexp.sub(r'<a href="http://webkit.org/b/\g<bug_id>">bug \g<bug_id></a>', value)
value = patch_regexp.sub(r'<a href="https://bugs.webkit.org/attachment.cgi?id=\g<patch_id>&action=prettypatch">patch \g<patch_id></a>', value)
return value
@register.filter
@stringfilter
def webkit_bug_id(value):
return '<a href="http://webkit.org/b/%s">%s</a>' % (value, value)
@register.filter
@stringfilter
def webkit_attachment_id(value):
return '<a href="https://bugs.webkit.org/attachment.cgi?id=%s&action=prettypatch">%s</a>' % (value, value)
@register.filter
@stringfilter
def results_link(status_id):
return '<a href="/results/%s">results</a>' % status_id
@register.filter
@stringfilter
def queue_status_link(queue_name, text):
return '<a href="/queue-status/%s">%s</a>' % (queue_name, text)
@register.filter
@stringfilter
def queue_charts_link(queue_name, text):
return '<a href="/queue-charts/%s">%s</a>' % (queue_name, text)
|
Dev-Cloud-Platform/Dev-Cloud | refs/heads/master | dev_cloud/web_service/__init__.py | 4 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
|
RJVB/audacity | refs/heads/master | lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/cs.py | 133 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Utils,Task,Options,Logs,Errors
from waflib.TaskGen import before_method,after_method,feature
from waflib.Tools import ccroot
from waflib.Configure import conf
import os,tempfile
ccroot.USELIB_VARS['cs']=set(['CSFLAGS','ASSEMBLIES','RESOURCES'])
ccroot.lib_patterns['csshlib']=['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
cs_nodes=[]
no_nodes=[]
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source=no_nodes
bintype=getattr(self,'bintype',self.gen.endswith('.dll')and'library'or'exe')
self.cs_task=tsk=self.create_task('mcs',cs_nodes,self.path.find_or_declare(self.gen))
tsk.env.CSTYPE='/target:%s'%bintype
tsk.env.OUT='/out:%s'%tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS','/platform:%s'%getattr(self,'platform','anycpu'))
inst_to=getattr(self,'install_path',bintype=='exe'and'${BINDIR}'or'${LIBDIR}')
if inst_to:
mod=getattr(self,'chmod',bintype=='exe'and Utils.O755 or Utils.O644)
self.install_task=self.bld.install_files(inst_to,self.cs_task.outputs[:],env=self.env,chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS','/reference:%s'%x)
continue
y.post()
tsk=getattr(y,'cs_task',None)or getattr(y,'link_task',None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r'%self)
self.cs_task.dep_nodes.extend(tsk.outputs)
self.cs_task.set_run_after(tsk)
self.env.append_value('CSFLAGS','/reference:%s'%tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs','use_cs')
def debug_cs(self):
csdebug=getattr(self,'csdebug',self.env.CSDEBUG)
if not csdebug:
return
node=self.cs_task.outputs[0]
if self.env.CS_NAME=='mono':
out=node.parent.find_or_declare(node.name+'.mdb')
else:
out=node.change_ext('.pdb')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
if csdebug=='pdbonly':
val=['/debug+','/debug:pdbonly']
elif csdebug=='full':
val=['/debug+','/debug:full']
else:
val=['/debug-']
self.env.append_value('CSFLAGS',val)
class mcs(Task.Task):
color='YELLOW'
run_str='${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def exec_command(self,cmd,**kw):
bld=self.generator.bld
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
try:
tmp=None
if isinstance(cmd,list)and len(' '.join(cmd))>=8192:
program=cmd[0]
cmd=[self.quote_response_command(x)for x in cmd]
(fd,tmp)=tempfile.mkstemp()
os.write(fd,'\r\n'.join(i.replace('\\','\\\\')for i in cmd[1:]))
os.close(fd)
cmd=[program,'@'+tmp]
ret=self.generator.bld.exec_command(cmd,**kw)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass
return ret
def quote_response_command(self,flag):
if flag.lower()=='/noconfig':
return''
if flag.find(' ')>-1:
for x in('/r:','/reference:','/resource:','/lib:','/out:'):
if flag.startswith(x):
flag='%s"%s"'%(x,flag[len(x):])
break
else:
flag='"%s"'%flag
return flag
def configure(conf):
csc=getattr(Options.options,'cscbinary',None)
if csc:
conf.env.MCS=csc
conf.find_program(['csc','mcs','gmcs'],var='MCS')
conf.env.ASS_ST='/r:%s'
conf.env.RES_ST='/resource:%s'
conf.env.CS_NAME='csc'
if str(conf.env.MCS).lower().find('mcs')>-1:
conf.env.CS_NAME='mono'
def options(opt):
opt.add_option('--with-csc-binary',type='string',dest='cscbinary')
class fake_csshlib(Task.Task):
color='YELLOW'
inst_to=None
def runnable_status(self):
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
@conf
def read_csshlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='csshlib')
|
OSU-slatelab/specGAN | refs/heads/master | data_io.py | 1 | """
Functions for dealing with data input and output.
"""
from os import path
import _pickle as pickle
import gzip
import logging
import numpy as np
import struct
logger = logging.getLogger(__name__)
#-----------------------------------------------------------------------------#
# GENERAL I/O FUNCTIONS #
#-----------------------------------------------------------------------------#
def smart_open(filename, mode=None):
"""Opens a file normally or using gzip based on the extension."""
if path.splitext(filename)[-1] == ".gz":
if mode is None:
mode = "rb"
return gzip.open(filename, mode)
else:
if mode is None:
mode = "r"
return open(filename, mode)
def np_from_text(text_fn, phonedict, txt_base_dir=""):
ark_dict = {}
with open(text_fn) as f:
for line in f:
if line == "":
continue
utt_id = line.replace("\n", "").split(" ")[0]
text = line.replace("\n", "").split(" ")[1:]
rows = len(text)
#cols = 51
utt_mat = np.zeros((rows))
for i in range(len(text)):
utt_mat[i] = phonedict[text[i]]
ark_dict[utt_id] = utt_mat
return ark_dict
def read_kaldi_ark_from_scp(uid, offset, batch_size, buffer_size, scp_fn, ark_base_dir=""):
"""
Read a binary Kaldi archive and return a dict of Numpy matrices, with the
utterance IDs of the SCP as keys. Based on the code:
https://github.com/yajiemiao/pdnn/blob/master/io_func/kaldi_feat.py
Parameters
----------
ark_base_dir : str
The base directory for the archives to which the SCP points.
"""
ark_dict = {}
totframes = 0
lines = 0
with open(scp_fn) as f:
for line in f:
lines = lines + 1
if lines<=uid:
continue
if line == "":
continue
utt_id, path_pos = line.replace("\n", "").split(" ")
ark_path, pos = path_pos.split(":")
ark_path = path.join(ark_base_dir, ark_path)
ark_read_buffer = smart_open(ark_path, "rb")
ark_read_buffer.seek(int(pos),0)
header = struct.unpack("<xcccc", ark_read_buffer.read(5))
#assert header[0] == "B", "Input .ark file is not binary"
rows = 0
cols = 0
m,rows = struct.unpack("<bi", ark_read_buffer.read(5))
n,cols = struct.unpack("<bi", ark_read_buffer.read(5))
tmp_mat = np.frombuffer(ark_read_buffer.read(rows*cols*4), dtype=np.float32)
utt_mat = np.reshape(tmp_mat, (rows, cols))
#utt_mat_list=utt_mat.tolist()
ark_read_buffer.close()
ark_dict[utt_id] = utt_mat
totframes += rows
if totframes>=(batch_size*buffer_size-offset):
break
return ark_dict,lines
def kaldi_write_mats(ark_path, utt_id, utt_mat):
ark_write_buf = smart_open(ark_path, "ab")
utt_mat = np.asarray(utt_mat, dtype=np.float32)
rows, cols = utt_mat.shape
ark_write_buf.write(struct.pack('<%ds'%(len(utt_id)), utt_id))
ark_write_buf.write(struct.pack('<cxcccc', b' ',b'B',b'F',b'M',b' '))
ark_write_buf.write(struct.pack('<bi', 4, rows))
ark_write_buf.write(struct.pack('<bi', 4, cols))
ark_write_buf.write(utt_mat)
|
DoWhatILove/turtle | refs/heads/master | programming/python/library/scikit-learn/knn/tryIt.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 2 23:22:37 2018
@author: davch
"""
#%%
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.